Paparazzi UAS  v7.0_unstable
Paparazzi is a free software Unmanned Aircraft System.
detect_window.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2015
3  *
4  * This file is part of Paparazzi.
5  *
6  * Paparazzi is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * Paparazzi is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with paparazzi; see the file COPYING. If not, see
18  * <http://www.gnu.org/licenses/>.
19  *
20  */
21 
27 #define RES 100
28 #define N_WINDOW_SIZES 1
29 
30 #include "cv.h"
31 #include "detect_window.h"
32 #include <stdio.h>
33 
34 #ifndef DETECT_WINDOW_FPS
35 #define DETECT_WINDOW_FPS 0
36 #endif
37 PRINT_CONFIG_VAR(DETECT_WINDOW_FPS)
38 
40 {
41 #ifdef DETECT_WINDOW_CAMERA
42  cv_add_to_device(&DETECT_WINDOW_CAMERA, detect_window, DETECT_WINDOW_FPS, 0);
43 #else
44 #warning "DETECT_WINDOW_CAMERA not defined, CV callback not added to device"
45 #endif
46 }
47 
48 struct image_t *detect_window(struct image_t *img, uint8_t camera_id)
49 {
50 
51  uint16_t coordinate[2];
52  coordinate[0] = 0; coordinate[1] = 0;
53  uint16_t response = 0;
54  uint32_t integral_image[img->w * img->h];
55  struct image_t gray;
56  image_create(&gray, img->w, img->h, IMAGE_GRAYSCALE);
57  image_to_grayscale(img, &gray);
58 
59  response = detect_window_sizes((uint8_t *)gray.buf, (uint32_t)img->w, (uint32_t)img->h, coordinate, integral_image,
60  MODE_BRIGHT);
61  printf("Coordinate: %d, %d\n", coordinate[0], coordinate[1]);
62  printf("Response = %d\n", response);
63 
64  image_free(&gray);
65  return NULL; // No new image was created
66 }
67 
68 
69 uint16_t detect_window_sizes(uint8_t *in, uint32_t image_width, uint32_t image_height, uint16_t *coordinate,
70  uint32_t *integral_image, uint8_t MODE)
71 {
72  // whether to calculate the integral image (only do once):
73  uint8_t calculate_integral_image = 1;
74  uint16_t sizes[N_WINDOW_SIZES];
75  uint16_t best_response[N_WINDOW_SIZES];
76  uint16_t best_index = 0;
77  uint16_t best_xc = 0;
78  uint16_t best_yc = 0;
79  uint16_t s = 0;
80  sizes[0] = 100; //sizes[1] = 40; sizes[2] = 50; sizes[3] = 60;
81 
82  for (s = 0; s < N_WINDOW_SIZES; s++) {
83 
84  // coordinate will contain the coordinate, best_response will be the best match * 100
85  calculate_integral_image = (s == 0); // only calculate the integal image for the first window size
86  best_response[s] = detect_window_one_size(in, image_width, image_height, coordinate, &sizes[s],
87  calculate_integral_image, integral_image, MODE);
88  if (s == 0 || best_response[s] < best_response[best_index]) {
89  best_index = s;
90  best_xc = coordinate[0];
91  best_yc = coordinate[1];
92  }
93  }
94 
95  coordinate[0] = best_xc;
96  coordinate[1] = best_yc;
97  return best_response[best_index];
98 }
99 
100 uint16_t detect_window_one_size(uint8_t *in, uint32_t image_width, uint32_t image_height, uint16_t *coordinate,
101  uint16_t *size, uint8_t calculate_integral_image, uint32_t *integral_image, uint8_t MODE)
102 {
103  /*
104  * Steps:
105  * (0) filter out the bad pixels (i.e., those lower than 4) and replace them with a disparity of 6.
106  * (1) get integral image (if calculate_integral_image == 1)
107  * (2) determine responses per location while determining the best-matching location (put it in coordinate)
108  */
109 
110  // output of the function:
111  uint16_t min_response = RES;
112 
113  // parameters:
114  //uint16_t image_border = 10;
115  uint16_t window_size, border_size, feature_size, px_whole, px_inner, px_border, px_outer;
116  uint16_t relative_border = 15; // border in percentage of window size
117 
118  // declaration other vars:
119  uint16_t x, y;
120  uint32_t response;
121 
122  // (1) get integral image (if calculate_integral_image == 1)
123 
124  if (calculate_integral_image) {
125  get_integral_image(in, image_width, image_height, integral_image);
126  }
127 
128  // window size is without border, feature size is with border:
129  window_size = (*size);
130  border_size = (relative_border * window_size) / 100; // percentage
131  feature_size = window_size + 2 * border_size;
132  px_inner = feature_size - 2 * border_size;
133  px_inner = px_inner * px_inner;
134  px_whole = feature_size * feature_size;
135  px_border = px_whole - px_inner;
136  px_outer = border_size * window_size;
137 
138  // (2) determine a response map for that size
139  for (x = 0; x < image_width - feature_size; x++) {
140  for (y = 0; y < image_height - feature_size; y++) {
141  response = get_window_response(x, y, feature_size, border_size, integral_image, image_width, image_height, px_inner,
142  px_border, MODE);
143 
144  if (response < RES) {
145  if (MODE == MODE_DARK) {
146  // the inside is further away than the outside, perform the border test:
147  response = get_border_response(x, y, feature_size, window_size, border_size, integral_image, image_width, image_height,
148  px_inner, px_outer);
149  }
150 
151  if (response < min_response) {
152  coordinate[0] = x;
153  coordinate[1] = y;
154  min_response = response;
155  }
156  } else {
157  //in[x+y*image_width] = 255;
158 
159  }
160  }
161  }
162 
163  // the coordinate is at the top left corner of the feature,
164  // the center of the window is then at:
165  coordinate[0] += feature_size / 2;
166  coordinate[1] += feature_size / 2;
167 
168  return min_response;
169 }
170 
171 // this function can help if the window is not visible anymore:
172 uint16_t detect_escape(uint8_t *in __attribute__((unused)), uint32_t image_width, uint32_t image_height, uint16_t *escape_coordinate,
173  uint32_t *integral_image, uint8_t n_cells)
174 {
175  uint16_t c, r, min_c, min_r;
176  uint16_t cell_width, cell_height;
177  uint32_t min_avg = 10000;
178  uint32_t avg;
179  uint16_t border = 10;
180  cell_width = (image_width - 2 * border) / n_cells;
181  cell_height = (image_height - 2 * border) / n_cells;
182  // Get the average disparities of all cells in a grid:
183  for (c = 0; c < n_cells; c++) {
184  for (r = 0; r < n_cells; r++) {
185  avg = get_avg_disparity(c * cell_width + border, r * cell_height + border, (c + 1) * cell_width + border,
186  (r + 1) * cell_height + border, integral_image, image_width, image_height);
187  if (avg < min_avg) {
188  min_avg = avg;
189  min_c = c;
190  min_r = r;
191  }
192  }
193  }
194  // return coordinates for the best option:
195  if (min_avg == 10000) {
196  escape_coordinate[0] = image_width / 2;
197  escape_coordinate[1] = image_height / 2;
198  } else {
199  escape_coordinate[0] = min_c * cell_width + border + cell_width / 2;
200  escape_coordinate[1] = min_r * cell_height + border + cell_height / 2;
201  }
202 
203  return min_avg;
204 }
205 
206 void get_integral_image(uint8_t *in, uint32_t image_width, uint32_t image_height, uint32_t *integral_image)
207 {
208  uint16_t x, y;
209  for (x = 0; x < image_width; x++) {
210  for (y = 0; y < image_height; y++) {
211  if (x >= 1 && y >= 1) {
212  integral_image[x + y * image_width] = (uint32_t) in[x + y * image_width] + integral_image[x - 1 + y * image_width] +
213  integral_image[x + (y - 1) * image_width] - integral_image[x - 1 + (y - 1) * image_width];
214  } else if (x >= 1) {
215  integral_image[x + y * image_width] = (uint32_t) in[x + y * image_width] + integral_image[x - 1 + y * image_width];
216  } else if (y >= 1) {
217  integral_image[x + y * image_width] = (uint32_t) in[x + y * image_width] + integral_image[x + (y - 1) * image_width];
218  } else {
219  integral_image[x + y * image_width] = (uint32_t) in[x + y * image_width];
220  }
221  }
222  }
223 }
224 
225 uint32_t get_sum_disparities(uint16_t min_x, uint16_t min_y, uint16_t max_x, uint16_t max_y, uint32_t *integral_image,
226  uint32_t image_width, uint32_t image_height)
227 {
228  uint32_t sum;
229  // If variables are not unsigned, then check for negative inputs
230  // if (min_x + min_y * image_width < 0) { return 0; }
231  if (max_x + max_y * image_width >= image_width * image_height) { return 0; }
232  sum = integral_image[min_x + min_y * image_width] + integral_image[max_x + max_y * image_width] -
233  integral_image[max_x + min_y * image_width] - integral_image[min_x + max_y * image_width];
234  return sum;
235 }
236 
237 uint32_t get_avg_disparity(uint16_t min_x, uint16_t min_y, uint16_t max_x, uint16_t max_y, uint32_t *integral_image,
238  uint32_t image_width, uint32_t image_height __attribute__((unused)))
239 {
240  uint16_t w, h;
241  uint32_t sum, avg, n_pix;
242 
243  // width and height of the window
244  w = max_x - min_x + 1;
245  h = max_y - min_y + 1;
246  n_pix = w * h;
247  // sum over the area:
248  sum = integral_image[min_x + min_y * image_width] + integral_image[max_x + max_y * image_width] -
249  integral_image[max_x + min_y * image_width] - integral_image[min_x + max_y * image_width];
250  // take the average, scaled by RES:
251  avg = (sum * RES) / n_pix;
252  return avg;
253 }
254 
255 
256 uint16_t get_window_response(uint16_t x, uint16_t y, uint16_t feature_size, uint16_t border, uint32_t *integral_image,
257  uint16_t image_width, uint16_t image_height, uint16_t px_inner, uint16_t px_border, uint8_t MODE)
258 {
259  uint32_t whole_area, inner_area, resp;
260 
261  whole_area = get_sum_disparities(x, y, x + feature_size, y + feature_size, integral_image, image_width, image_height);
262 
263  inner_area = get_sum_disparities(x + border, y + border, x + feature_size - border, y + feature_size - border,
264  integral_image, image_width, image_height);
265 
266  if (MODE == MODE_DARK) {
267  if (whole_area - inner_area > 0) {
268  resp = (inner_area * RES * px_border) / ((whole_area - inner_area) * px_inner);
269  } else {
270  resp = RES;
271  }
272  } else { //if(MODE == MODE_BRIGHT)
273  if (inner_area > 0 && (inner_area / px_inner) > 0) {
274  resp = (RES * (whole_area - inner_area) / px_border) / (inner_area / px_inner);
275  //printf("%u: %u %u %u %u\n",resp,(RES*RES*(whole_area - inner_area)/px_border), (inner_area/px_inner), px_inner, px_border);
276  } else {
277  resp = RES;
278  }
279  }
280 
281 
282  return resp;
283 }
284 
285 uint16_t get_border_response(uint16_t x, uint16_t y, uint16_t feature_size, uint16_t window_size, uint16_t border,
286  uint32_t *integral_image, uint16_t image_width, uint16_t image_height, uint16_t px_inner, uint16_t px_outer)
287 {
288  uint32_t inner_area, avg_inner, left_area, right_area, up_area, down_area, darkest, avg_dark, resp;
289  // inner area
290  inner_area = get_sum_disparities(x + border, y + border, x + feature_size - border, y + feature_size - border,
291  integral_image, image_width, image_height);
292  avg_inner = (RES * inner_area) / px_inner;
293  // outer areas:
294  left_area = get_sum_disparities(x, y + border, x + border, y + border + window_size, integral_image, image_width,
295  image_height);
296  right_area = get_sum_disparities(x + border + window_size, y + border, x + 2 * border + window_size,
297  y + border + window_size, integral_image, image_width, image_height);
298  up_area = get_sum_disparities(x + border, y, x + border + window_size, y + border, integral_image, image_width,
299  image_height);
300  down_area = get_sum_disparities(x + border, y + border + window_size, x + border + window_size,
301  y + 2 * border + window_size, integral_image, image_width, image_height);
302  // darkest outer area:
303  darkest = (left_area < right_area) ? left_area : right_area;
304  darkest = (darkest < up_area) ? darkest : up_area;
305  darkest = (darkest < down_area) ? darkest : down_area;
306  avg_dark = RES * darkest / px_outer;
307  if (avg_dark < avg_inner) {
308  resp = RES;
309  } else {
310  if (avg_dark == 0) {
311  resp = RES;
312  } else {
313  resp = RES * avg_inner / avg_dark;
314  }
315  }
316 
317  return resp;
318 }
319 
320 void filter_bad_pixels(uint8_t *in, uint32_t image_width, uint32_t image_height)
321 {
322  uint16_t x, y;
323  for (x = 0; x < image_width; x++) {
324  for (y = 0; y < image_height; y++) {
325  if (in[x + y * image_width] < 4) {
326  in[x + y * image_width] = 6;
327  }
328  }
329  }
330 }
331 
332 
333 void transform_illuminance_image(uint8_t *in, uint8_t *out, uint32_t image_width, uint32_t image_height, uint8_t n_bits,
334  uint8_t bright_win)
335 {
336  uint16_t x, y;
337  for (x = 0; x < image_width; x++) {
338  for (y = 0; y < image_height; y++) {
339  // we put the right image entirely in the left image instead of in the even rows:
340  if (!bright_win) {
341  out[x + y * image_width] = in[2 * (x + y * image_width)] >> n_bits;
342  } else {
343  out[x + y * image_width] = (255 - in[2 * (x + y * image_width)]) >> n_bits;
344  }
345  }
346  }
347 }
static void h(const real32_T x[7], const real32_T q[4], real32_T y[6])
struct video_listener * cv_add_to_device(struct video_config_t *device, cv_function func, uint16_t fps, uint8_t id)
Definition: cv.c:46
Computer vision framework for onboard processing.
uint32_t get_sum_disparities(uint16_t min_x, uint16_t min_y, uint16_t max_x, uint16_t max_y, uint32_t *integral_image, uint32_t image_width, uint32_t image_height)
uint16_t get_border_response(uint16_t x, uint16_t y, uint16_t feature_size, uint16_t window_size, uint16_t border, uint32_t *integral_image, uint16_t image_width, uint16_t image_height, uint16_t px_inner, uint16_t px_outer)
uint16_t detect_window_one_size(uint8_t *in, uint32_t image_width, uint32_t image_height, uint16_t *coordinate, uint16_t *size, uint8_t calculate_integral_image, uint32_t *integral_image, uint8_t MODE)
uint32_t get_avg_disparity(uint16_t min_x, uint16_t min_y, uint16_t max_x, uint16_t max_y, uint32_t *integral_image, uint32_t image_width, uint32_t image_height)
struct image_t * detect_window(struct image_t *img, uint8_t camera_id)
Definition: detect_window.c:48
#define RES
Definition: detect_window.c:27
#define DETECT_WINDOW_FPS
Default FPS (zero means run at camera fps)
Definition: detect_window.c:35
uint16_t detect_window_sizes(uint8_t *in, uint32_t image_width, uint32_t image_height, uint16_t *coordinate, uint32_t *integral_image, uint8_t MODE)
Definition: detect_window.c:69
void transform_illuminance_image(uint8_t *in, uint8_t *out, uint32_t image_width, uint32_t image_height, uint8_t n_bits, uint8_t bright_win)
void get_integral_image(uint8_t *in, uint32_t image_width, uint32_t image_height, uint32_t *integral_image)
#define N_WINDOW_SIZES
Definition: detect_window.c:28
void filter_bad_pixels(uint8_t *in, uint32_t image_width, uint32_t image_height)
void detect_window_init(void)
Definition: detect_window.c:39
uint16_t get_window_response(uint16_t x, uint16_t y, uint16_t feature_size, uint16_t border, uint32_t *integral_image, uint16_t image_width, uint16_t image_height, uint16_t px_inner, uint16_t px_border, uint8_t MODE)
uint16_t detect_escape(uint8_t *in, uint32_t image_width, uint32_t image_height, uint16_t *escape_coordinate, uint32_t *integral_image, uint8_t n_cells)
Detect a bright region surrounded by dark or viceversa - sometimes this corresponds to a window.
#define MODE_BRIGHT
Definition: detect_window.h:33
#define MODE_DARK
Definition: detect_window.h:32
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
Definition: image.c:131
void image_free(struct image_t *img)
Free the image.
Definition: image.c:75
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
Definition: image.c:43
void * buf
Image buffer (depending on the image_type)
Definition: image.h:54
uint16_t h
Image height.
Definition: image.h:47
uint16_t w
Image width.
Definition: image.h:46
@ IMAGE_GRAYSCALE
Grayscale image with only the Y part (uint8 per pixel)
Definition: image.h:37
Definition: image.h:44
static uint32_t s
unsigned short uint16_t
Typedef defining 16 bit unsigned short type.
Definition: vl53l1_types.h:88
unsigned int uint32_t
Typedef defining 32 bit unsigned int type.
Definition: vl53l1_types.h:78
unsigned char uint8_t
Typedef defining 8 bit unsigned char type.
Definition: vl53l1_types.h:98