Paparazzi UAS  v6.0_unstable-41-g5cfda00-dirty
Paparazzi is a free software Unmanned Aircraft System.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
opticflow_calculator.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2014 Hann Woei Ho
3  * 2015 Freek van Tienen <freek.v.tienen@gmail.com>
4  * 2016 Kimberly McGuire <k.n.mcguire@tudelft.nl
5  *
6  * This file is part of Paparazzi.
7  *
8  * Paparazzi is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * Paparazzi is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with Paparazzi; see the file COPYING. If not, see
20  * <http://www.gnu.org/licenses/>.
21  */
22 
29 #include "std.h"
30 
31 #include <stdio.h>
32 #include <string.h>
33 #include <stdlib.h>
34 
35 // Own Header
36 #include "opticflow_calculator.h"
37 
38 // Computer Vision
39 #include "lib/vision/image.h"
41 #include "lib/vision/fast_rosten.h"
42 #include "lib/vision/act_fast.h"
43 #include "lib/vision/edge_flow.h"
45 #include "size_divergence.h"
46 #include "linear_flow_fit.h"
47 #include "modules/sonar/agl_dist.h"
48 
49 // to get the definition of front_camera / bottom_camera
50 #include BOARD_CONFIG
51 
52 // whether to show the flow and corners:
53 #define OPTICFLOW_SHOW_CORNERS 0
54 
55 #define EXHAUSTIVE_FAST 0
56 #define ACT_FAST 1
57 // TODO: these are now adapted, but perhaps later could be a setting:
58 uint16_t n_time_steps[2] = {10, 10};
59 uint16_t n_agents[2] = {25, 25};
60 
61 // What methods are run to determine divergence, lateral flow, etc.
62 // SIZE_DIV looks at line sizes and only calculates divergence
63 #define SIZE_DIV 1
64 // LINEAR_FIT makes a linear optical flow field fit and extracts a lot of information:
65 // relative velocities in x, y, z (divergence / time to contact), the slope of the surface, and the surface roughness.
66 #define LINEAR_FIT 1
67 
68 #ifndef OPTICFLOW_CORNER_METHOD
69 #define OPTICFLOW_CORNER_METHOD ACT_FAST
70 #endif
71 
72 #ifndef OPTICFLOW_CORNER_METHOD_CAMERA2
73 #define OPTICFLOW_CORNER_METHOD_CAMERA2 ACT_FAST
74 #endif
75 PRINT_CONFIG_VAR(OPTICFLOW_CORNER_METHOD)
76 PRINT_CONFIG_VAR(OPTICFLOW_CORNER_METHOD_CAMERA2)
77 
78 /* Set the default values */
79 #ifndef OPTICFLOW_MAX_TRACK_CORNERS
80 #define OPTICFLOW_MAX_TRACK_CORNERS 25
81 #endif
82 
83 #ifndef OPTICFLOW_MAX_TRACK_CORNERS_CAMERA2
84 #define OPTICFLOW_MAX_TRACK_CORNERS_CAMERA2 25
85 #endif
86 PRINT_CONFIG_VAR(OPTICFLOW_MAX_TRACK_CORNERS)
88 
89 #ifndef OPTICFLOW_WINDOW_SIZE
90 #define OPTICFLOW_WINDOW_SIZE 10
91 #endif
92 
93 #ifndef OPTICFLOW_WINDOW_SIZE_CAMERA2
94 #define OPTICFLOW_WINDOW_SIZE_CAMERA2 10
95 #endif
96 PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE)
97 PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE_CAMERA2)
98 
99 #ifndef OPTICFLOW_SEARCH_DISTANCE
100 #define OPTICFLOW_SEARCH_DISTANCE 20
101 #endif
102 
103 #ifndef OPTICFLOW_SEARCH_DISTANCE_CAMERA2
104 #define OPTICFLOW_SEARCH_DISTANCE_CAMERA2 20
105 #endif
106 PRINT_CONFIG_VAR(OPTICFLOW_SEARCH_DISTANCE)
107 PRINT_CONFIG_VAR(OPTICFLOW_SEARCH_DISTANCE_CAMERA2)
108 
109 #ifndef OPTICFLOW_SUBPIXEL_FACTOR
110 #define OPTICFLOW_SUBPIXEL_FACTOR 10
111 #endif
112 
113 #ifndef OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2
114 #define OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2 10
115 #endif
116 PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR)
117 PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2)
118 
119 #ifndef OPTICFLOW_RESOLUTION_FACTOR
120 #define OPTICFLOW_RESOLUTION_FACTOR 100
121 #endif
122 
123 #ifndef OPTICFLOW_RESOLUTION_FACTOR_CAMERA2
124 #define OPTICFLOW_RESOLUTION_FACTOR_CAMERA2 100
125 #endif
126 PRINT_CONFIG_VAR(OPTICFLOW_RESOLUTION_FACTOR)
127 PRINT_CONFIG_VAR(OPTICFLOW_RESOLUTION_FACTOR_CAMERA2)
128 
129 #ifndef OPTICFLOW_MAX_ITERATIONS
130 #define OPTICFLOW_MAX_ITERATIONS 10
131 #endif
132 
133 #ifndef OPTICFLOW_MAX_ITERATIONS_CAMERA2
134 #define OPTICFLOW_MAX_ITERATIONS_CAMERA2 10
135 #endif
136 PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS)
137 PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS_CAMERA2)
138 
139 #ifndef OPTICFLOW_THRESHOLD_VEC
140 #define OPTICFLOW_THRESHOLD_VEC 2
141 #endif
142 
143 #ifndef OPTICFLOW_THRESHOLD_VEC_CAMERA2
144 #define OPTICFLOW_THRESHOLD_VEC_CAMERA2 2
145 #endif
146 PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC)
147 PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC_CAMERA2)
148 
149 #ifndef OPTICFLOW_PYRAMID_LEVEL
150 #define OPTICFLOW_PYRAMID_LEVEL 2
151 #endif
152 
153 #ifndef OPTICFLOW_PYRAMID_LEVEL_CAMERA2
154 #define OPTICFLOW_PYRAMID_LEVEL_CAMERA2 2
155 #endif
156 PRINT_CONFIG_VAR(OPTICFLOW_PYRAMID_LEVEL)
157 PRINT_CONFIG_VAR(OPTICFLOW_PYRAMID_LEVEL_CAMERA2)
158 
159 #ifndef OPTICFLOW_FAST9_ADAPTIVE
160 #define OPTICFLOW_FAST9_ADAPTIVE TRUE
161 #endif
162 
163 #ifndef OPTICFLOW_FAST9_ADAPTIVE_CAMERA2
164 #define OPTICFLOW_FAST9_ADAPTIVE_CAMERA2 TRUE
165 #endif
166 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE)
167 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE_CAMERA2)
168 
169 #ifndef OPTICFLOW_FAST9_THRESHOLD
170 #define OPTICFLOW_FAST9_THRESHOLD 20
171 #endif
172 
173 #ifndef OPTICFLOW_FAST9_THRESHOLD_CAMERA2
174 #define OPTICFLOW_FAST9_THRESHOLD_CAMERA2 20
175 #endif
176 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD)
177 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD_CAMERA2)
178 
179 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE
180 #define OPTICFLOW_FAST9_MIN_DISTANCE 10
181 #endif
182 
183 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2
184 #define OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2 10
185 #endif
186 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE)
187 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2)
188 
189 #ifndef OPTICFLOW_FAST9_PADDING
190 #define OPTICFLOW_FAST9_PADDING 20
191 #endif
192 
193 #ifndef OPTICFLOW_FAST9_PADDING_CAMERA2
194 #define OPTICFLOW_FAST9_PADDING_CAMERA2 20
195 #endif
196 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_PADDING)
197 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_PADDING_CAMERA2)
198 
199 // thresholds FAST9 that are currently not set from the GCS:
200 #define FAST9_LOW_THRESHOLD 5
201 #define FAST9_HIGH_THRESHOLD 60
202 
203 
204 #ifndef OPTICFLOW_METHOD
205 #define OPTICFLOW_METHOD 0
206 #endif
207 
208 #ifndef OPTICFLOW_METHOD_CAMERA2
209 #define OPTICFLOW_METHOD_CAMERA2 0
210 #endif
211 PRINT_CONFIG_VAR(OPTICFLOW_METHOD)
212 PRINT_CONFIG_VAR(OPTICFLOW_METHOD_CAMERA2)
213 
214 #if OPTICFLOW_METHOD > 1
215 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected camera1
216 #endif
217 
218 #if OPTICFLOW_METHOD_CAMERA2 > 1
219 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected camera2
220 #endif
221 
222 #ifndef OPTICFLOW_DEROTATION
223 #define OPTICFLOW_DEROTATION TRUE
224 #endif
225 
226 #ifndef OPTICFLOW_DEROTATION_CAMERA2
227 #define OPTICFLOW_DEROTATION_CAMERA2 TRUE
228 #endif
229 PRINT_CONFIG_VAR(OPTICFLOW_DEROTATION)
230 PRINT_CONFIG_VAR(OPTICFLOW_DEROTATION_CAMERA2)
231 
232 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
233 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X 1.0
234 #endif
235 
236 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X_CAMERA2
237 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X_CAMERA2 1.0
238 #endif
241 
242 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
243 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y 1.0
244 #endif
245 
246 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y_CAMERA2
247 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y_CAMERA2 1.0
248 #endif
251 
252 #ifndef OPTICFLOW_MEDIAN_FILTER
253 #define OPTICFLOW_MEDIAN_FILTER FALSE
254 #endif
255 
256 #ifndef OPTICFLOW_MEDIAN_FILTER_CAMERA2
257 #define OPTICFLOW_MEDIAN_FILTER_CAMERA2 FALSE
258 #endif
259 PRINT_CONFIG_VAR(OPTICFLOW_MEDIAN_FILTER)
260 PRINT_CONFIG_VAR(OPTICFLOW_MEDIAN_FILTER_CAMERA2)
261 
262 #ifndef OPTICFLOW_FEATURE_MANAGEMENT
263 #define OPTICFLOW_FEATURE_MANAGEMENT 0
264 #endif
265 
266 #ifndef OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2
267 #define OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2 0
268 #endif
269 PRINT_CONFIG_VAR(OPTICFLOW_FEATURE_MANAGEMENT)
270 PRINT_CONFIG_VAR(OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2)
271 
272 #ifndef OPTICFLOW_FAST9_REGION_DETECT
273 #define OPTICFLOW_FAST9_REGION_DETECT 1
274 #endif
275 
276 #ifndef OPTICFLOW_FAST9_REGION_DETECT_CAMERA2
277 #define OPTICFLOW_FAST9_REGION_DETECT_CAMERA2 1
278 #endif
279 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_REGION_DETECT)
281 
282 #ifndef OPTICFLOW_FAST9_NUM_REGIONS
283 #define OPTICFLOW_FAST9_NUM_REGIONS 9
284 #endif
285 
286 #ifndef OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2
287 #define OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2 9
288 #endif
289 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_NUM_REGIONS)
290 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2)
291 
292 #ifndef OPTICFLOW_ACTFAST_LONG_STEP
293 #define OPTICFLOW_ACTFAST_LONG_STEP 10
294 #endif
295 
296 #ifndef OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2
297 #define OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2 10
298 #endif
299 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_LONG_STEP)
300 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2)
301 
302 #ifndef OPTICFLOW_ACTFAST_SHORT_STEP
303 #define OPTICFLOW_ACTFAST_SHORT_STEP 2
304 #endif
305 
306 #ifndef OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2
307 #define OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2 2
308 #endif
309 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_SHORT_STEP)
310 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2)
311 
312 #ifndef OPTICFLOW_ACTFAST_GRADIENT_METHOD
313 #define OPTICFLOW_ACTFAST_GRADIENT_METHOD 1
314 #endif
315 
316 #ifndef OPTICFLOW_ACTFAST_GRADIENT_METHOD_CAMERA2
317 #define OPTICFLOW_ACTFAST_GRADIENT_METHOD_CAMERA2 1
318 #endif
319 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_GRADIENT_METHOD)
321 
322 #ifndef OPTICFLOW_ACTFAST_MIN_GRADIENT
323 #define OPTICFLOW_ACTFAST_MIN_GRADIENT 10
324 #endif
325 
326 #ifndef OPTICFLOW_ACTFAST_MIN_GRADIENT_CAMERA2
327 #define OPTICFLOW_ACTFAST_MIN_GRADIENT_CAMERA2 10
328 #endif
329 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_MIN_GRADIENT)
331 
332 // Defaults for ARdrone
333 #ifndef OPTICFLOW_BODY_TO_CAM_PHI
334 #define OPTICFLOW_BODY_TO_CAM_PHI 0
335 #endif
336 #ifndef OPTICFLOW_BODY_TO_CAM_THETA
337 #define OPTICFLOW_BODY_TO_CAM_THETA 0
338 #endif
339 #ifndef OPTICFLOW_BODY_TO_CAM_PSI
340 #define OPTICFLOW_BODY_TO_CAM_PSI -M_PI_2
341 #endif
342 
343 #ifndef OPTICFLOW_BODY_TO_CAM_PHI_CAMERA2
344 #define OPTICFLOW_BODY_TO_CAM_PHI_CAMERA2 0
345 #endif
346 #ifndef OPTICFLOW_BODY_TO_CAM_THETA_CAMERA2
347 #define OPTICFLOW_BODY_TO_CAM_THETA_CAMERA2 0
348 #endif
349 #ifndef OPTICFLOW_BODY_TO_CAM_PSI_CAMERA2
350 #define OPTICFLOW_BODY_TO_CAM_PSI_CAMERA2 -M_PI_2
351 #endif
352 
353 // Tracking back flow to make the accepted flow vectors more robust:
354 // Default is false, as it does take extra processing time
355 #ifndef OPTICFLOW_TRACK_BACK
356 #define OPTICFLOW_TRACK_BACK FALSE
357 #endif
358 
359 #ifndef OPTICFLOW_TRACK_BACK_CAMERA2
360 #define OPTICFLOW_TRACK_BACK_CAMERA2 FALSE
361 #endif
362 PRINT_CONFIG_VAR(OPTICFLOW_TRACK_BACK)
363 PRINT_CONFIG_VAR(OPTICFLOW_TRACK_BACK_CAMERA2)
364 
365 // Whether to draw the flow on the image:
366 // False by default, since it changes the image and costs time.
367 #ifndef OPTICFLOW_SHOW_FLOW
368 #define OPTICFLOW_SHOW_FLOW FALSE
369 #endif
370 
371 #ifndef OPTICFLOW_SHOW_FLOW_CAMERA2
372 #define OPTICFLOW_SHOW_FLOW_CAMERA2 FALSE
373 #endif
374 PRINT_CONFIG_VAR(OPTICFLOW_SHOW_FLOW)
375 PRINT_CONFIG_VAR(OPTICFLOW_SHOW_FLOW_CAMERA2)
376 
377 
378 //Include median filter
379 #include "filters/median_filter.h"
382 
383 /* Functions only used here */
384 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime);
385 static int cmp_flow(const void *a, const void *b);
386 static int cmp_array(const void *a, const void *b);
387 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow,
388  struct opticflow_result_t *result);
389 
390 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
391  float theta_diff, float psi_diff, struct opticflow_t *opticflow);
397 {
398  /* Set the default values */
399  opticflow[0].method = OPTICFLOW_METHOD; //0 = LK_fast9, 1 = Edgeflow
400  opticflow[0].window_size = OPTICFLOW_WINDOW_SIZE;
402  opticflow[0].derotation = OPTICFLOW_DEROTATION; //0 = OFF, 1 = ON
405  opticflow[0].track_back = OPTICFLOW_TRACK_BACK;
406  opticflow[0].show_flow = OPTICFLOW_SHOW_FLOW;
409  if (opticflow[0].subpixel_factor == 0) {
410  opticflow[0].subpixel_factor = 10;
411  }
414  opticflow[0].threshold_vec = OPTICFLOW_THRESHOLD_VEC;
415  opticflow[0].pyramid_level = OPTICFLOW_PYRAMID_LEVEL;
416  opticflow[0].median_filter = OPTICFLOW_MEDIAN_FILTER;
420 
424  opticflow[0].fast9_padding = OPTICFLOW_FAST9_PADDING;
425  opticflow[0].fast9_rsize = FAST9_MAX_CORNERS;
426  opticflow[0].fast9_ret_corners = calloc(opticflow[0].fast9_rsize, sizeof(struct point_t));
427 
428  opticflow[0].corner_method = OPTICFLOW_CORNER_METHOD;
433 
434  opticflow[0].camera = &OPTICFLOW_CAMERA;
435  opticflow[0].id = 0;
436 
438  float_rmat_of_eulers(&body_to_cam[0], &euler_cam1);
439 
440 #ifdef OPTICFLOW_CAMERA2
441  opticflow[1].method = OPTICFLOW_METHOD_CAMERA2; //0 = LK_fast9, 1 = Edgeflow
444  opticflow[1].derotation = OPTICFLOW_DEROTATION_CAMERA2; //0 = OFF, 1 = ON
448  opticflow[1].show_flow = OPTICFLOW_SHOW_FLOW_CAMERA2;
451  if (opticflow[1].subpixel_factor == 0) {
452  opticflow[1].subpixel_factor = 10;
453  }
462 
467  opticflow[1].fast9_rsize = FAST9_MAX_CORNERS;
468  opticflow[1].fast9_ret_corners = calloc(opticflow[0].fast9_rsize, sizeof(struct point_t));
469 
475 
476  opticflow[1].camera = &OPTICFLOW_CAMERA2;
477  opticflow[1].id = 1;
478 
481  float_rmat_of_eulers(&body_to_cam[1], &euler_cam2);
482 #endif
483 }
493  struct opticflow_result_t *result)
494 {
495  if (opticflow->just_switched_method) {
496  // Create the image buffers
497  image_create(&opticflow->img_gray, img->w, img->h, IMAGE_GRAYSCALE);
498  image_create(&opticflow->prev_img_gray, img->w, img->h, IMAGE_GRAYSCALE);
499 
500  // Set the previous values
501  opticflow->got_first_img = false;
502 
503  // Init median filters with zeros
505  }
506 
507  // Convert image to grayscale
508  image_to_grayscale(img, &opticflow->img_gray);
509 
510  if (!opticflow->got_first_img) {
511  image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
512  opticflow->got_first_img = true;
513  return false;
514  }
515 
516  // variables for linear flow fit:
517  float error_threshold;
518  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
519  struct linear_flow_fit_info fit_info;
520 
521  // Update FPS for information
522  float dt = timeval_diff(&(opticflow->prev_img_gray.ts), &(img->ts));
523  if (dt > 1e-5) {
524  result->fps = 1000.f / dt;
525  } else {
526  return false;
527  }
528 
529  // *************************************************************************************
530  // Corner detection
531  // *************************************************************************************
532 
533  // if feature_management is selected and tracked corners drop below a threshold, redetect
534  if ((opticflow->feature_management) && (result->corner_cnt < opticflow->max_track_corners / 2)) {
535  manage_flow_features(img, opticflow, result);
536  } else if (!opticflow->feature_management) {
537  // needs to be set to 0 because result is now static
538  result->corner_cnt = 0;
539  if (opticflow->corner_method == EXHAUSTIVE_FAST) {
540  // FAST corner detection
541  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
542  // to 0 (see defines), however a more permanent solution should be considered
543  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
544  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt, &opticflow->fast9_rsize,
545  &opticflow->fast9_ret_corners, NULL);
546  } else if (opticflow->corner_method == ACT_FAST) {
547  // ACT-FAST corner detection:
548  act_fast(&opticflow->prev_img_gray, opticflow->fast9_threshold, &result->corner_cnt,
549  &opticflow->fast9_ret_corners, n_agents[opticflow->id], n_time_steps[opticflow->id],
550  opticflow->actfast_long_step, opticflow->actfast_short_step, opticflow->actfast_min_gradient,
551  opticflow->actfast_gradient_method, opticflow->id);
552  }
553 
554  // Adaptive threshold
555  if (opticflow->fast9_adaptive) {
556 
557  // This works well for exhaustive FAST, but drives the threshold to the minimum for ACT-FAST:
558  // Decrease and increase the threshold based on previous values
559  if (result->corner_cnt < opticflow->max_track_corners / 2) {
560  // make detections easier:
561  if (opticflow->fast9_threshold > FAST9_LOW_THRESHOLD) {
562  opticflow->fast9_threshold--;
563  }
564 
565  if (opticflow->corner_method == ACT_FAST && n_agents[opticflow->id] < opticflow->fast9_rsize) {
566  n_time_steps[opticflow->id]++;
567  n_agents[opticflow->id]++;
568  }
569 
570  } else if (result->corner_cnt > opticflow->max_track_corners * 2) {
571 
572  if (opticflow->fast9_threshold < FAST9_HIGH_THRESHOLD) {
573  opticflow->fast9_threshold++;
574  }
575 
576  if (opticflow->corner_method == ACT_FAST && n_time_steps[opticflow->id] > 5 && n_agents[opticflow->id] > 10) {
577  n_time_steps[opticflow->id]--;
578  n_agents[opticflow->id]--;
579  }
580  }
581  }
582  }
583 
584 #if OPTICFLOW_SHOW_CORNERS
585  image_show_points(img, opticflow->fast9_ret_corners, result->corner_cnt);
586 #endif
587 
588  // Check if we found some corners to track
589  if (result->corner_cnt < 1) {
590  // Clear the result otherwise the previous values will be returned for this frame too
591  VECT3_ASSIGN(result->vel_cam, 0, 0, 0);
592  VECT3_ASSIGN(result->vel_body, 0, 0, 0);
593  result->div_size = 0;
594  result->divergence = 0;
595  result->noise_measurement = 5.0;
596 
597  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
598  return false;
599  }
600 
601  // *************************************************************************************
602  // Corner Tracking
603  // *************************************************************************************
604 
605  // Execute a Lucas Kanade optical flow
606  result->tracked_cnt = result->corner_cnt;
607  uint8_t keep_bad_points = 0;
608  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, opticflow->fast9_ret_corners,
609  &result->tracked_cnt,
610  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
611  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level, keep_bad_points);
612 
613 
614  if (opticflow->track_back) {
615  // TODO: Watch out!
616  // We track the flow back and give badly back-tracked vectors a high error,
617  // but we do not yet remove these vectors, nor use the errors in any other function than showing the flow.
618 
619  // initialize corners at the tracked positions:
620  for (int i = 0; i < result->tracked_cnt; i++) {
621  opticflow->fast9_ret_corners[i].x = (uint32_t)(vectors[i].pos.x + vectors[i].flow_x) / opticflow->subpixel_factor;
622  opticflow->fast9_ret_corners[i].y = (uint32_t)(vectors[i].pos.y + vectors[i].flow_y) / opticflow->subpixel_factor;
623  }
624 
625  // present the images in the opposite order:
626  keep_bad_points = 1;
627  uint16_t back_track_cnt = result->tracked_cnt;
628  struct flow_t *back_vectors = opticFlowLK(&opticflow->prev_img_gray, &opticflow->img_gray, opticflow->fast9_ret_corners,
629  &back_track_cnt,
630  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
631  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level, keep_bad_points);
632 
633  // printf("Tracked %d points back.\n", back_track_cnt);
634  int32_t back_x, back_y, diff_x, diff_y, dist_squared;
635  int32_t back_track_threshold = 200;
636 
637  for (int i = 0; i < result->tracked_cnt; i++) {
638  if (back_vectors[i].error < LARGE_FLOW_ERROR) {
639  back_x = (int32_t)(back_vectors[i].pos.x + back_vectors[i].flow_x);
640  back_y = (int32_t)(back_vectors[i].pos.y + back_vectors[i].flow_y);
641  diff_x = back_x - vectors[i].pos.x;
642  diff_y = back_y - vectors[i].pos.y;
643  dist_squared = diff_x * diff_x + diff_y * diff_y;
644  // printf("Vector %d: x,y = %d, %d, back x, y = %d, %d, back tracking error %d\n", i, vectors[i].pos.x, vectors[i].pos.y, back_x, back_y, dist_squared);
645  if (dist_squared > back_track_threshold) {
646  vectors[i].error = LARGE_FLOW_ERROR;
647  }
648  } else {
649  vectors[i].error = LARGE_FLOW_ERROR;
650  }
651  }
652 
653  free(back_vectors);
654  }
655 
656  if (opticflow->show_flow) {
657  uint8_t color[4] = {0, 0, 0, 0};
658  uint8_t bad_color[4] = {0, 0, 0, 0};
659  image_show_flow_color(img, vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
660  }
661 
662  static int n_samples = 100;
663  // Estimate size divergence:
664  if (SIZE_DIV) {
665  result->div_size = get_size_divergence(vectors, result->tracked_cnt, n_samples);// * result->fps;
666  } else {
667  result->div_size = 0.0f;
668  }
669 
670  if (LINEAR_FIT) {
671  // Linear flow fit (normally derotation should be performed before):
672  error_threshold = 10.0f;
673  n_iterations_RANSAC = 20;
674  n_samples_RANSAC = 5;
675  success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
676  n_samples_RANSAC, img->w, img->h, &fit_info);
677 
678  if (!success_fit) {
679  fit_info.divergence = 0.0f;
680  fit_info.surface_roughness = 0.0f;
681  }
682 
683  result->divergence = fit_info.divergence;
684  result->surface_roughness = fit_info.surface_roughness;
685  } else {
686  result->divergence = 0.0f;
687  result->surface_roughness = 0.0f;
688  }
689 
690  // Get the median flow
691  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
692  if (result->tracked_cnt == 0) {
693  // We got no flow
694  result->flow_x = 0;
695  result->flow_y = 0;
696 
697  free(vectors);
698  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
699  return false;
700  } else if (result->tracked_cnt % 2) {
701  // Take the median point
702  result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
703  result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
704  } else {
705  // Take the average of the 2 median points
706  result->flow_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
707  result->flow_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
708  }
709 
710  // TODO scale flow to rad/s here
711 
712  // ***************
713  // Flow Derotation
714  // ***************
715 
716  float diff_flow_x = 0.f;
717  float diff_flow_y = 0.f;
718 
719  if (opticflow->derotation && result->tracked_cnt > 5) {
720 
721  float rotation_threshold = M_PI / 180.0f;
722  if (fabs(opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi) > rotation_threshold
723  || fabs(opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta) > rotation_threshold) {
724 
725  // do not apply the derotation if the rotation rates are too high:
726  result->flow_der_x = 0.0f;
727  result->flow_der_y = 0.0f;
728 
729  } else {
730 
731  // determine the roll, pitch, yaw differencces between the images.
732  float phi_diff = opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi;
733  float theta_diff = opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta;
734  float psi_diff = opticflow->img_gray.eulers.psi - opticflow->prev_img_gray.eulers.psi;
735 
736  if (strcmp(opticflow->camera->dev_name, bottom_camera.dev_name) == 0) {
737  // bottom cam: just subtract a scaled version of the roll and pitch difference from the global flow vector:
738  diff_flow_x = phi_diff * opticflow->camera->camera_intrinsics.focal_x; // phi_diff works better than (cam_state->rates.p)
739  diff_flow_y = theta_diff * opticflow->camera->camera_intrinsics.focal_y;
740  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
742  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
744  } else {
745  // frontal cam, predict individual flow vectors:
746  struct flow_t *predicted_flow_vectors = predict_flow_vectors(vectors, result->tracked_cnt, phi_diff, theta_diff,
747  psi_diff, opticflow);
748  if (opticflow->show_flow) {
749  uint8_t color[4] = {255, 255, 255, 255};
750  uint8_t bad_color[4] = {255, 255, 255, 255};
751  image_show_flow_color(img, predicted_flow_vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
752  }
753 
754  for (int i = 0; i < result->tracked_cnt; i++) {
755  // subtract the flow:
756  vectors[i].flow_x -= predicted_flow_vectors[i].flow_x;
757  vectors[i].flow_y -= predicted_flow_vectors[i].flow_y;
758  }
759 
760  // vectors have to be re-sorted after derotation:
761  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
762 
763  if (result->tracked_cnt % 2) {
764  // Take the median point
765  result->flow_der_x = vectors[result->tracked_cnt / 2].flow_x;
766  result->flow_der_y = vectors[result->tracked_cnt / 2].flow_y;
767  } else {
768  // Take the average of the 2 median points
769  result->flow_der_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
770  result->flow_der_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
771  }
772  }
773  }
774  }
775  result->camera_id = opticflow->id;
776 
777  // Velocity calculation
778  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
779  // TODO: Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
780  // TODO: This is actually only correct for the bottom camera:
781  result->vel_cam.x = (float)result->flow_der_x * result->fps * agl_dist_value_filtered /
782  (opticflow->subpixel_factor * opticflow->camera->camera_intrinsics.focal_x);
783  result->vel_cam.y = (float)result->flow_der_y * result->fps * agl_dist_value_filtered /
784  (opticflow->subpixel_factor * opticflow->camera->camera_intrinsics.focal_y);
785  result->vel_cam.z = result->divergence * result->fps * agl_dist_value_filtered;
786 
787  //Apply a median filter to the velocity if wanted
788  if (opticflow->median_filter == true) {
790  }
791 
792  // Determine quality of noise measurement for state filter
793  //TODO develop a noise model based on groundtruth
794  //result->noise_measurement = 1 - (float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25f);
795  result->noise_measurement = 0.25;
796 
797  // *************************************************************************************
798  // Next Loop Preparation
799  // *************************************************************************************
800  if (opticflow->feature_management) {
801  result->corner_cnt = result->tracked_cnt;
802  //get the new positions of the corners and the "residual" subpixel positions
803  for (uint16_t i = 0; i < result->tracked_cnt; i++) {
804  opticflow->fast9_ret_corners[i].x = (uint32_t)((vectors[i].pos.x + (float)vectors[i].flow_x) /
805  opticflow->subpixel_factor);
806  opticflow->fast9_ret_corners[i].y = (uint32_t)((vectors[i].pos.y + (float)vectors[i].flow_y) /
807  opticflow->subpixel_factor);
808  opticflow->fast9_ret_corners[i].x_sub = (uint16_t)((vectors[i].pos.x + vectors[i].flow_x) % opticflow->subpixel_factor);
809  opticflow->fast9_ret_corners[i].y_sub = (uint16_t)((vectors[i].pos.y + vectors[i].flow_y) % opticflow->subpixel_factor);
810  opticflow->fast9_ret_corners[i].count = vectors[i].pos.count;
811  }
812  }
813  free(vectors);
814  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
815  return true;
816 }
817 
818 /*
819  * Predict flow vectors by means of the rotation rates:
820  */
821 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
822  float theta_diff, float psi_diff, struct opticflow_t *opticflow)
823 {
824 
825  // reserve memory for the predicted flow vectors:
826  struct flow_t *predicted_flow_vectors = malloc(sizeof(struct flow_t) * n_points);
827 
828  float K[9] = {opticflow->camera->camera_intrinsics.focal_x, 0.0f, opticflow->camera->camera_intrinsics.center_x,
829  0.0f, opticflow->camera->camera_intrinsics.focal_y, opticflow->camera->camera_intrinsics.center_y,
830  0.0f, 0.0f, 1.0f
831  };
832  // TODO: make an option to not do distortion / undistortion (Dhane_k = 1)
833  float k = opticflow->camera->camera_intrinsics.Dhane_k;
834 
835  float A, B, C; // as in Longuet-Higgins
836 
837  if (strcmp(opticflow->camera->dev_name, front_camera.dev_name) == 0) {
838  // specific for the x,y swapped Bebop 2 images:
839  A = -psi_diff;
840  B = theta_diff;
841  C = phi_diff;
842  } else {
843  A = theta_diff;
844  B = phi_diff;
845  C = psi_diff;
846  }
847 
848  float x_n, y_n;
849  float x_n_new, y_n_new, x_pix_new, y_pix_new;
850  float predicted_flow_x, predicted_flow_y;
851  for (uint16_t i = 0; i < n_points; i++) {
852  // the from-coordinate is always the same:
853  predicted_flow_vectors[i].pos.x = flow_vectors[i].pos.x;
854  predicted_flow_vectors[i].pos.y = flow_vectors[i].pos.y;
855 
856  bool success = distorted_pixels_to_normalized_coords((float)flow_vectors[i].pos.x / opticflow->subpixel_factor,
857  (float)flow_vectors[i].pos.y / opticflow->subpixel_factor, &x_n, &y_n, k, K);
858  if (success) {
859  // predict flow as in a linear pinhole camera model:
860  predicted_flow_x = A * x_n * y_n - B * x_n * x_n - B + C * y_n;
861  predicted_flow_y = -C * x_n + A + A * y_n * y_n - B * x_n * y_n;
862 
863  x_n_new = x_n + predicted_flow_x;
864  y_n_new = y_n + predicted_flow_y;
865 
866  success = normalized_coords_to_distorted_pixels(x_n_new, y_n_new, &x_pix_new, &y_pix_new, k, K);
867 
868  if (success) {
869  predicted_flow_vectors[i].flow_x = (int16_t)(x_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.x);
870  predicted_flow_vectors[i].flow_y = (int16_t)(y_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.y);
871  predicted_flow_vectors[i].error = 0;
872  } else {
873  predicted_flow_vectors[i].flow_x = 0;
874  predicted_flow_vectors[i].flow_y = 0;
875  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
876  }
877  } else {
878  predicted_flow_vectors[i].flow_x = 0;
879  predicted_flow_vectors[i].flow_y = 0;
880  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
881  }
882  }
883  return predicted_flow_vectors;
884 }
885 
886 
887 /* manage_flow_features - Update list of corners to be tracked by LK
888  * Remembers previous points and tries to find new points in less dense
889  * areas of the image first.
890  *
891  */
892 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
893 {
894  // first check if corners have not moved too close together due to flow:
895  int16_t c1 = 0;
896  while (c1 < (int16_t)result->corner_cnt - 1) {
897  bool exists = false;
898  for (int16_t i = c1 + 1; i < result->corner_cnt; i++) {
899  if (abs((int16_t)opticflow->fast9_ret_corners[c1].x - (int16_t)opticflow->fast9_ret_corners[i].x) <
900  opticflow->fast9_min_distance / 2
901  && abs((int16_t)opticflow->fast9_ret_corners[c1].y - (int16_t)opticflow->fast9_ret_corners[i].y) <
902  opticflow->fast9_min_distance / 2) {
903  // if too close, replace the corner with the last one in the list:
904  opticflow->fast9_ret_corners[c1].x = opticflow->fast9_ret_corners[result->corner_cnt - 1].x;
905  opticflow->fast9_ret_corners[c1].y = opticflow->fast9_ret_corners[result->corner_cnt - 1].y;
906  opticflow->fast9_ret_corners[c1].count = opticflow->fast9_ret_corners[result->corner_cnt - 1].count;
907  opticflow->fast9_ret_corners[c1].x_sub = opticflow->fast9_ret_corners[result->corner_cnt - 1].x_sub;
908  opticflow->fast9_ret_corners[c1].y_sub = opticflow->fast9_ret_corners[result->corner_cnt - 1].y_sub;
909 
910  // decrease the number of corners:
911  result->corner_cnt--;
912  exists = true;
913  // no further checking required for the removed corner
914  break;
915  }
916  }
917  // if the corner has been replaced, the new corner in position c1 has to be checked again:
918  if (!exists) { c1++; }
919  }
920 
921  // no need for "per region" re-detection when there are no previous corners
922  if ((!opticflow->fast9_region_detect) || (result->corner_cnt == 0)) {
923  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
924  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
925  &opticflow->fast9_rsize,
926  &opticflow->fast9_ret_corners,
927  NULL);
928  } else {
929  // allocating memory and initializing the 2d array that holds the number of corners per region and its index (for the sorting)
930  uint16_t **region_count = calloc(opticflow->fast9_num_regions, sizeof(uint16_t *));
931  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
932  region_count[i] = calloc(2, sizeof(uint16_t));
933  region_count[i][0] = 0;
934  region_count[i][1] = i;
935  }
936  uint16_t root_regions = (uint16_t)sqrtf((float)opticflow->fast9_num_regions);
937  int region_index;
938  for (uint16_t i = 0; i < result->corner_cnt; i++) {
939  region_index = (opticflow->fast9_ret_corners[i].x * root_regions / img->w
940  + root_regions * (opticflow->fast9_ret_corners[i].y * root_regions / img->h));
941  region_index = (region_index < opticflow->fast9_num_regions) ? region_index : opticflow->fast9_num_regions - 1;
942  region_count[region_index][0]++;
943  }
944 
945  //sorting region_count array according to first column (number of corners).
946  qsort(region_count, opticflow->fast9_num_regions, sizeof(region_count[0]), cmp_array);
947 
948  uint16_t roi[4];
949  // Detecting corners from the region with the less to the one with the most, until a desired total is reached.
950  for (uint16_t i = 0; i < opticflow->fast9_num_regions && result->corner_cnt < 2 * opticflow->max_track_corners; i++) {
951  // Find the boundaries of the region of interest
952  roi[0] = (region_count[i][1] % root_regions) * (img->w / root_regions);
953  roi[1] = (region_count[i][1] / root_regions) * (img->h / root_regions);
954  roi[2] = roi[0] + (img->w / root_regions);
955  roi[3] = roi[1] + (img->h / root_regions);
956 
957  struct point_t *new_corners = calloc(opticflow->fast9_rsize, sizeof(struct point_t));
958  uint16_t new_count = 0;
959 
960  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
961  opticflow->fast9_padding, opticflow->fast9_padding, &new_count,
962  &opticflow->fast9_rsize, &new_corners, roi);
963 
964  // check that no identified points already exist in list
965  for (uint16_t j = 0; j < new_count; j++) {
966  bool exists = false;
967  for (uint16_t k = 0; k < result->corner_cnt; k++) {
968  if (abs((int16_t)new_corners[j].x - (int16_t)opticflow->fast9_ret_corners[k].x) < (int16_t)opticflow->fast9_min_distance
969  && abs((int16_t)new_corners[j].y - (int16_t)opticflow->fast9_ret_corners[k].y) < (int16_t)
970  opticflow->fast9_min_distance) {
971  exists = true;
972  break;
973  }
974  }
975  if (!exists) {
976  opticflow->fast9_ret_corners[result->corner_cnt].x = new_corners[j].x;
977  opticflow->fast9_ret_corners[result->corner_cnt].y = new_corners[j].y;
978  opticflow->fast9_ret_corners[result->corner_cnt].count = 0;
979  opticflow->fast9_ret_corners[result->corner_cnt].x_sub = 0;
980  opticflow->fast9_ret_corners[result->corner_cnt].y_sub = 0;
981  result->corner_cnt++;
982 
983  if (result->corner_cnt >= opticflow->fast9_rsize) {
984  break;
985  }
986  }
987  }
988 
989  free(new_corners);
990  }
991  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
992  free(region_count[i]);
993  }
994  free(region_count);
995  }
996 }
997 
1007  struct opticflow_result_t *result)
1008 {
1009  // Define Static Variables
1010  static struct edge_hist_t edge_hist[MAX_HORIZON];
1011  static uint8_t current_frame_nr = 0;
1012  struct edge_flow_t edgeflow;
1013  static uint8_t previous_frame_offset[2] = {1, 1};
1014 
1015  // Define Normal variables
1016  struct edgeflow_displacement_t displacement;
1017  displacement.x = calloc(img->w, sizeof(int32_t));
1018  displacement.y = calloc(img->h, sizeof(int32_t));
1019  // If the methods just switched to this one, reintialize the
1020  // array of edge_hist structure.
1021  if (opticflow->just_switched_method == 1 && edge_hist[0].x == NULL) {
1022  int i;
1023  for (i = 0; i < MAX_HORIZON; i++) {
1024  edge_hist[i].x = calloc(img->w, sizeof(int32_t));
1025  edge_hist[i].y = calloc(img->h, sizeof(int32_t));
1026  FLOAT_EULERS_ZERO(edge_hist[i].eulers);
1027  }
1028  }
1029  uint16_t disp_range;
1030  if (opticflow->search_distance < DISP_RANGE_MAX) {
1031  disp_range = opticflow->search_distance;
1032  } else {
1033  disp_range = DISP_RANGE_MAX;
1034  }
1035 
1036  uint16_t window_size;
1037 
1038  if (opticflow->window_size < MAX_WINDOW_SIZE) {
1039  window_size = opticflow->window_size;
1040  } else {
1041  window_size = MAX_WINDOW_SIZE;
1042  }
1043 
1044  uint16_t RES = opticflow->resolution_factor;
1045 
1046  //......................Calculating EdgeFlow..................... //
1047 
1048  // Calculate current frame's edge histogram
1049  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
1050  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
1051  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
1052  calculate_edge_histogram(img, edge_hist_y, 'y', 0);
1053 
1054 
1055  // Copy frame time and angles of image to calculated edge histogram
1056  edge_hist[current_frame_nr].frame_time = img->ts;
1057  edge_hist[current_frame_nr].eulers = img->eulers;
1058 
1059  // Calculate which previous edge_hist to compare with the current
1060  uint8_t previous_frame_nr[2];
1061  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);
1062 
1063  //Select edge histogram from the previous frame nr
1064  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
1065  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;
1066 
1067  //Calculate the corresponding derotation of the two frames
1068  int16_t der_shift_x = 0;
1069  int16_t der_shift_y = 0;
1070  if (opticflow->derotation) {
1071  der_shift_x = (int16_t)((edge_hist[current_frame_nr].eulers.phi - edge_hist[previous_frame_nr[0]].eulers.phi) *
1073  der_shift_y = (int16_t)((edge_hist[current_frame_nr].eulers.theta - edge_hist[previous_frame_nr[1]].eulers.theta) *
1075  }
1076 
1077  // Estimate pixel wise displacement of the edge histograms for x and y direction
1078  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
1079  displacement.x, img->w,
1080  window_size, disp_range, der_shift_x);
1081  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
1082  displacement.y, img->h,
1083  window_size, disp_range, der_shift_y);
1084 
1085  // Fit a line on the pixel displacement to estimate
1086  // the global pixel flow and divergence (RES is resolution)
1087  line_fit(displacement.x, &edgeflow.div_x,
1088  &edgeflow.flow_x, img->w,
1089  window_size + disp_range, RES);
1090  line_fit(displacement.y, &edgeflow.div_y,
1091  &edgeflow.flow_y, img->h,
1092  window_size + disp_range, RES);
1093 
1094  /* Save Resulting flow in results
1095  * Warning: The flow detected here is different in sign
1096  * and size, therefore this will be divided with
1097  * the same subpixel factor and multiplied by -1 to make it
1098  * on par with the LK algorithm in opticalflow_calculator.c
1099  * */
1100  edgeflow.flow_x = -1 * edgeflow.flow_x;
1101  edgeflow.flow_y = -1 * edgeflow.flow_y;
1102 
1103  edgeflow.flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
1104  edgeflow.flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];
1105 
1106  result->flow_x = (int16_t)edgeflow.flow_x / RES;
1107  result->flow_y = (int16_t)edgeflow.flow_y / RES;
1108 
1109  //Fill up the results optic flow to be on par with LK_fast9
1110  result->flow_der_x = result->flow_x;
1111  result->flow_der_y = result->flow_y;
1112  result->corner_cnt = getAmountPeaks(edge_hist_x, 500, img->w);
1113  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500, img->w);
1114  result->divergence = -1.0 * (float)edgeflow.div_x /
1115  RES; // Also multiply the divergence with -1.0 to make it on par with the LK algorithm of
1116  result->div_size = result->divergence; // Fill the div_size with the divergence to atleast get some divergenge measurement when switching from LK to EF
1117  result->camera_id = opticflow->id;
1118  result->surface_roughness = 0.0f;
1119  //......................Calculating VELOCITY ..................... //
1120 
1121  /*Estimate fps per direction
1122  * This is the fps with adaptive horizon for subpixel flow, which is not similar
1123  * to the loop speed of the algorithm. The faster the quadcopter flies
1124  * the higher it becomes
1125  */
1126  float fps_x = 0;
1127  float fps_y = 0;
1128  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
1129  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
1130  fps_x = 1 / (time_diff_x);
1131  fps_y = 1 / (time_diff_y);
1132 
1133  result->fps = fps_x;
1134 
1135  // TODO scale flow to rad/s here
1136 
1137  // Calculate velocity
1138  result->vel_cam.x = edgeflow.flow_x * fps_x * agl_dist_value_filtered * opticflow->camera->camera_intrinsics.focal_x /
1139  RES;
1140  result->vel_cam.y = edgeflow.flow_y * fps_y * agl_dist_value_filtered * opticflow->camera->camera_intrinsics.focal_y /
1141  RES;
1142  result->vel_cam.z = result->divergence * fps_x * agl_dist_value_filtered;
1143 
1144  //Apply a median filter to the velocity if wanted
1145  if (opticflow->median_filter == true) {
1147  }
1148 
1149  result->noise_measurement = 0.2;
1150  if (opticflow->show_flow) {
1151  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
1152  }
1153  // Increment and wrap current time frame
1154  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
1155  // Free alloc'd variables
1156  free(displacement.x);
1157  free(displacement.y);
1158  return true;
1159 }
1160 
1161 
1170  struct opticflow_result_t *result)
1171 {
1172  bool flow_successful = false;
1173  // A switch counter that checks in the loop if the current method is similar,
1174  // to the previous (for reinitializing structs)
1175  static int8_t switch_counter[2] = {-1, -1};
1176  if (switch_counter[opticflow->id] != opticflow->method) {
1177  opticflow->just_switched_method = true;
1178  switch_counter[opticflow->id] = opticflow->method;
1179  // Clear the static result
1180  memset(result, 0, sizeof(struct opticflow_result_t));
1181  } else {
1182  opticflow->just_switched_method = false;
1183  }
1184 
1185  // Switch between methods (0 = fast9/lukas-kanade, 1 = EdgeFlow)
1186  if (opticflow->method == 0) {
1187  flow_successful = calc_fast9_lukas_kanade(opticflow, img, result);
1188  } else if (opticflow->method == 1) {
1189  flow_successful = calc_edgeflow_tot(opticflow, img, result);
1190  }
1191  /* Rotate velocities from camera frame coordinates to body coordinates for control
1192  * IMPORTANT!!! This frame to body orientation should be the case for the Parrot
1193  * ARdrone and Bebop, however this can be different for other quadcopters
1194  * ALWAYS double check!
1195  */
1196  float_rmat_transp_vmult(&result->vel_body, &body_to_cam[opticflow->id], &result->vel_cam);
1197 
1198  return flow_successful;
1199 }
1200 
1207 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
1208 {
1209  uint32_t msec;
1210  msec = (finishtime->tv_sec - starttime->tv_sec) * 1000;
1211  msec += (finishtime->tv_usec - starttime->tv_usec) / 1000;
1212  return msec;
1213 }
1214 
1222 static int cmp_flow(const void *a, const void *b)
1223 {
1224  const struct flow_t *a_p = (const struct flow_t *)a;
1225  const struct flow_t *b_p = (const struct flow_t *)b;
1226  return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y *
1227  b_p->flow_y);
1228 }
1229 
1237 static int cmp_array(const void *a, const void *b)
1238 {
1239  const uint16_t *pa = (const uint16_t *)a;
1240  const uint16_t *pb = (const uint16_t *)b;
1241  return pa[0] - pb[0];
1242 }
bool track_back
Whether to track flow vectors back to the previous image, in order to check if the back-tracked flow ...
uint16_t n_agents[2]
int actfast_gradient_method
Whether to use a simple or Sobel filter.
#define InitMedianFilterVect3Float(_f, _n)
int16_t flow_y
Flow in y direction from the camera (in subpixels) with Y positive to the bottom. ...
uint16_t fast9_min_distance
Minimum distance in pixels between corners.
void calculate_edge_displacement(int32_t *edge_histogram, int32_t *edge_histogram_prev, int32_t *displacement, uint16_t size, uint8_t window, uint8_t disp_range, int32_t der_shift)
Calculate_displacement calculates the displacement between two histograms.
Definition: edge_flow.c:168
int16_t flow_der_y
The derotated flow calculation in the y direction (in subpixels)
#define FAST9_HIGH_THRESHOLD
bool calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with fast9 and lukaskanade on a new image frame.
int32_t flow_x
Definition: edge_flow.h:77
float div_size
Divergence as determined with the size_divergence script.
bool feature_management
Decides whether to keep track corners in memory for the next frame instead of re-detecting every time...
uint8_t max_iterations
The maximum amount of iterations the Lucas Kanade algorithm should do.
float focal_x
focal length in the x-direction in pixels
Definition: video_device.h:47
#define MAX_WINDOW_SIZE
Definition: edge_flow.h:55
float phi
in radians
#define float_rmat_of_eulers
bool opticflow_calc_frame(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow on a new image frame.
#define OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2
#define OPTICFLOW_TRACK_BACK
struct MedianFilter3Float vel_filt
static float K[9]
#define OPTICFLOW_ACTFAST_MIN_GRADIENT
uint16_t window_size
Window size for the blockmatching algorithm (general value for all methods)
#define OPTICFLOW_ACTFAST_SHORT_STEP
uint16_t n_time_steps[2]
calculate optical flow with EdgeFlow
void image_switch(struct image_t *a, struct image_t *b)
This will switch image *a and *b This is faster as image_copy because it doesn't copy the whole image...
Definition: image.c:112
#define OPTICFLOW_SHOW_FLOW
#define OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2
#define FLOAT_EULERS_ZERO(_e)
uint16_t fast9_rsize
Amount of corners allocated.
#define OPTICFLOW_PYRAMID_LEVEL
Calculate velocity from optic flow.
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y_CAMERA2
bool median_filter
Decides to use a median filter on the velocity.
#define VECT3_ASSIGN(_a, _x, _y, _z)
Definition: pprz_algebra.h:125
signed char int8_t
Typedef defining 8 bit char type.
Definition: vl53l1_types.h:103
uint32_t getAmountPeaks(int32_t *edgehist, int32_t thres, int32_t size)
getAmountPeaks, calculates the amount of peaks in a edge histogram
Definition: edge_flow.c:337
#define UpdateMedianFilterVect3Float(_f, _v)
#define LARGE_FLOW_ERROR
Definition: lucas_kanade.h:37
#define OPTICFLOW_PYRAMID_LEVEL_CAMERA2
int32_t flow_y
The y direction flow in subpixels.
Definition: image.h:81
#define OPTICFLOW_MEDIAN_FILTER
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
Definition: image.c:43
Definition: image.h:44
#define OPTICFLOW_BODY_TO_CAM_PHI
int32_t div_x
Definition: edge_flow.h:78
short int16_t
Typedef defining 16 bit short type.
Definition: vl53l1_types.h:93
uint16_t max_track_corners
Maximum amount of corners Lucas Kanade should track.
#define OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2
void draw_edgeflow_img(struct image_t *img, struct edge_flow_t edgeflow, int32_t *edge_hist_x_prev, int32_t *edge_hist_x)
Draws edgehistogram, displacement and linefit directly on the image for debugging (only for edgeflow ...
Definition: edge_flow.c:297
Definition: image.h:78
float psi
in radians
int32_t * y
Definition: edge_flow.h:66
int32_t flow_y
Definition: edge_flow.h:79
void image_show_flow_color(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor, const uint8_t *color, const uint8_t *bad_color)
Shows the flow from a specific point to a new point This works on YUV422 and Grayscale images...
Definition: image.c:732
#define OPTICFLOW_THRESHOLD_VEC
#define OPTICFLOW_FAST9_MIN_DISTANCE
void image_copy(struct image_t *input, struct image_t *output)
Copy an image from inut to output This will only work if the formats are the same.
Definition: image.c:89
#define OPTICFLOW_BODY_TO_CAM_THETA_CAMERA2
#define OPTICFLOW_FAST9_ADAPTIVE
bool calc_edgeflow_tot(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with EDGEFLOW on a new image frame.
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt)
Show points in an image by coloring them through giving the pixels the maximum value.
Definition: image.c:668
struct opticflow_t opticflow[ACTIVE_CAMERAS]
Opticflow calculations.
static int cmp_flow(const void *a, const void *b)
Compare two flow vectors based on flow distance Used for sorting.
#define RES
Definition: detect_window.c:27
char * dev_name
path to device
Definition: video_device.h:59
#define OPTICFLOW_MAX_ITERATIONS_CAMERA2
Calculate divergence from flow vectors by looking at line sizes beteween the points.
struct FloatVect3 vel_body
The velocity in body frame (m/s) with X positive to the front of the aircraft, Y positive to the righ...
uint32_t x
The x coordinate of the point.
Definition: image.h:59
#define OPTICFLOW_ACTFAST_LONG_STEP
uint16_t resolution_factor
The resolution in EdgeFlow to determine the Divergence.
static struct flow_t * predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff, float theta_diff, float psi_diff, struct opticflow_t *opticflow)
uint8_t threshold_vec
The threshold in x, y subpixels which the algorithm should stop.
void calculate_edge_histogram(struct image_t *img, int32_t edge_histogram[], char direction, uint16_t edge_threshold)
Calculate a edge/gradient histogram for each dimension of the image.
Definition: edge_flow.c:88
bool derotation
Derotation switched on or off (depended on the quality of the gyroscope measurement) ...
Finds corners in an image by actively scanning the image.
uint16_t y_sub
The y subpixel coordinate of the point.
Definition: image.h:63
uint8_t method
Method to use to calculate the optical flow.
#define B
#define OPTICFLOW_BODY_TO_CAM_PHI_CAMERA2
euler angles
int n_samples
Definition: detect_gate.c:85
#define bottom_camera
Definition: mt9v117_nps.c:3
uint8_t fast9_num_regions
The number of regions of interest the image is split into.
#define OPTICFLOW_MAX_TRACK_CORNERS_CAMERA2
#define OPTICFLOW_FAST9_PADDING_CAMERA2
float theta
in radians
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
float get_size_divergence(struct flow_t *vectors, int count, int n_samples)
Get divergence from optical flow vectors based on line sizes between corners.
#define OPTICFLOW_ACTFAST_GRADIENT_METHOD_CAMERA2
Image helper functions like resizing, color filter, converters...
void act_fast(struct image_t *img, uint8_t fast_threshold, uint16_t *num_corners, struct point_t **ret_corners, uint16_t n_agents, uint16_t n_time_steps, float long_step, float short_step, int min_gradient, int gradient_method, int camera_id)
Do an ACT-FAST corner detection.
Definition: act_fast.c:54
uint8_t camera_id
Camera id as passed to cv_add_to_device.
#define MAX_HORIZON
Definition: edge_flow.h:45
uint16_t tracked_cnt
The amount of tracked corners.
void float_rmat_transp_vmult(struct FloatVect3 *vb, struct FloatRMat *m_b2a, struct FloatVect3 *va)
rotate 3D vector by transposed rotation matrix.
#define OPTICFLOW_ACTFAST_MIN_GRADIENT_CAMERA2
#define OPTICFLOW_RESOLUTION_FACTOR
#define OPTICFLOW_FEATURE_MANAGEMENT
bool normalized_coords_to_distorted_pixels(float x_n, float y_n, float *x_pd, float *y_pd, float k, const float *K)
Transform normalized coordinates to distorted pixel coordinates.
Definition: undistortion.c:146
uint16_t subpixel_factor
The amount of subpixels per pixel.
#define OPTICFLOW_FAST9_PADDING
bool analyze_linear_flow_field(struct flow_t *vectors, int count, float error_threshold, int n_iterations, int n_samples, int im_width, int im_height, struct linear_flow_fit_info *info)
Analyze a linear flow field, retrieving information such as divergence, surface roughness, focus of expansion, etc.
#define OPTICFLOW_CORNER_METHOD
struct point_t * fast9_ret_corners
Corners.
struct FloatRMat body_to_cam[2]
uint16_t w
Image width.
Definition: image.h:46
#define OPTICFLOW_FAST9_THRESHOLD
uint16_t h
Image height.
Definition: image.h:47
struct image_t prev_img_gray
Previous gray image frame.
struct FloatVect3 vel_cam
The velocity in camera frame (m/s)
#define OPTICFLOW_METHOD
int int32_t
Typedef defining 32 bit int type.
Definition: vl53l1_types.h:83
static uint16_t c1
Definition: baro_MS5534A.c:203
#define OPTICFLOW_BODY_TO_CAM_PSI
uint8_t fast9_threshold
FAST9 corner detection threshold.
bool just_switched_method
Boolean to check if methods has been switched (for reinitialization)
#define OPTICFLOW_FAST9_REGION_DETECT
uint16_t f
Camera baseline, in meters (i.e. horizontal distance between the two cameras of the stereo setup) ...
Definition: wedgebug.c:204
#define OPTICFLOW_BODY_TO_CAM_PSI_CAMERA2
#define OPTICFLOW_TRACK_BACK_CAMERA2
float b
Definition: wedgebug.c:202
Bind to agl ABI message and provide a filtered value to be used in flight plans.
struct point_t pos
The original position the flow comes from in subpixels.
Definition: image.h:79
float Dhane_k
(un)distortion parameter for a fish-eye lens
Definition: video_device.h:51
void opticflow_calc_init(struct opticflow_t opticflow[])
Initialize the opticflow calculator.
uint32_t y
The y coordinate of the point.
Definition: image.h:60
#define OPTICFLOW_WINDOW_SIZE
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
Definition: image.c:131
Definition: image.h:58
#define EXHAUSTIVE_FAST
uint16_t x_sub
The x subpixel coordinate of the point.
Definition: image.h:62
#define OPTICFLOW_MEDIAN_FILTER_CAMERA2
Functions for undistorting camera images.
struct image_t img_gray
Current gray image frame.
static int cmp_array(const void *a, const void *b)
Compare the rows of an integer (uint16_t) 2D array based on the first column.
uint32_t error
The matching error in the tracking process in subpixels.
Definition: image.h:82
#define OPTICFLOW_WINDOW_SIZE_CAMERA2
#define OPTICFLOW_FAST9_NUM_REGIONS
#define SIZE_DIV
uint16_t corner_cnt
The amount of coners found by FAST9.
#define OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2
#define OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2
float fps
Frames per second of the optical flow calculation.
efficient fixed-point optical-flow calculation
#define OPTICFLOW_DEROTATION
bool show_flow
Whether to draw the flow vectors on the image. Watch out! This changes the image as will be received ...
struct timeval ts
The timestamp of creation.
Definition: image.h:48
bool got_first_img
If we got a image to work with.
float divergence
Basically, relative_velocity_z. Actual divergence of a 2D flow field is 2 * relative_velocity_z.
struct FloatEulers eulers
Definition: edge_flow.h:68
#define OPTICFLOW_SEARCH_DISTANCE
const struct video_config_t * camera
float focal_y
focal length in the y-direction in pixels
Definition: video_device.h:48
bool fast9_region_detect
Decides whether to detect fast9 corners in specific regions of interest or the whole image (only for ...
#define LINEAR_FIT
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
#define OPTICFLOW_METHOD_CAMERA2
#define MEDIAN_DEFAULT_SIZE
Definition: median_filter.h:28
int32_t div_y
Definition: edge_flow.h:80
uint16_t count
Number of times the point has been tracked successfully.
Definition: image.h:61
float center_y
center image coordinate in the y-direction
Definition: video_device.h:50
struct camera_intrinsics_t camera_intrinsics
Intrinsics of the camera; camera calibration parameters and distortion parameter(s) ...
Definition: video_device.h:68
uint16_t fast9_padding
Padding used in FAST9 detector.
float surface_roughness
The error of the linear fit is a measure of surface roughness.
bool fast9_adaptive
Whether the FAST9 threshold should be adaptive.
#define OPTICFLOW_FAST9_THRESHOLD_CAMERA2
rotation matrix
bool distorted_pixels_to_normalized_coords(float x_pd, float y_pd, float *x_n, float *y_n, float k, const float *K)
Transform distorted pixel coordinates to normalized coordinates.
Definition: undistortion.c:128
#define OPTICFLOW_ACTFAST_GRADIENT_METHOD
#define OPTICFLOW_BODY_TO_CAM_THETA
#define ACT_FAST
float derotation_correction_factor_x
Correction factor for derotation in x axis, determined from a fit from the gyros and flow rotation...
#define A
#define OPTICFLOW_DEROTATION_CAMERA2
#define OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2
#define FAST9_MAX_CORNERS
static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
Grayscale image with only the Y part (uint8 per pixel)
Definition: image.h:37
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
Calculate the difference from start till finish.
void line_fit(int32_t *displacement, int32_t *divergence, int32_t *flow, uint32_t size, uint32_t border, uint16_t RES)
Fits a linear model to an array with pixel displacements with least squares.
Definition: edge_flow.c:247
#define OPTICFLOW_MAX_TRACK_CORNERS
#define OPTICFLOW_SEARCH_DISTANCE_CAMERA2
uint8_t corner_method
Method to use for determining where the corners are.
void calc_previous_frame_nr(struct opticflow_result_t *result, struct opticflow_t *opticflow, uint8_t current_frame_nr, uint8_t *previous_frame_offset, uint8_t *previous_frame_nr)
Calc_previous_frame_nr; adaptive Time Horizon.
Definition: edge_flow.c:39
#define OPTICFLOW_SUBPIXEL_FACTOR
int16_t flow_x
Flow in x direction from the camera (in subpixels) with X positive to the right.
#define OPTICFLOW_MAX_ITERATIONS
#define OPTICFLOW_CORNER_METHOD_CAMERA2
#define DISP_RANGE_MAX
Definition: edge_flow.h:52
#define front_camera
Definition: mt9f002_nps.c:3
int32_t flow_x
The x direction flow in subpixels.
Definition: image.h:80
uint8_t pyramid_level
Number of pyramid levels used in Lucas Kanade algorithm (0 == no pyramids used)
struct FloatEulers eulers
Euler Angles at time of image.
Definition: image.h:49
float noise_measurement
noise of measurement, for state filter
#define OPTICFLOW_RESOLUTION_FACTOR_CAMERA2
float actfast_short_step
Step size to take when there is an edge to be followed.
#define FAST9_LOW_THRESHOLD
void fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners, uint16_t *ret_corners_length, struct point_t **ret_corners, uint16_t *roi)
Do a FAST9 corner detection.
Definition: fast_rosten.c:52
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X_CAMERA2
float actfast_long_step
Step size to take when there is no texture.
#define OPTICFLOW_SHOW_FLOW_CAMERA2
uint16_t search_distance
Search distance for blockmatching alg.
float divergence
Divergence as determined with a linear flow fit.
float derotation_correction_factor_y
Correction factor for derotation in Y axis, determined from a fit from the gyros and flow rotation...
unsigned short uint16_t
Typedef defining 16 bit unsigned short type.
Definition: vl53l1_types.h:88
float surface_roughness
Surface roughness as determined with a linear optical flow fit.
struct timeval frame_time
Definition: edge_flow.h:67
struct flow_t * opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint8_t max_points, uint8_t pyramid_level, uint8_t keep_bad_points)
Definition: lucas_kanade.c:74
int32_t * x
Definition: edge_flow.h:65
float agl_dist_value_filtered
Definition: agl_dist.c:35
#define OPTICFLOW_THRESHOLD_VEC_CAMERA2
unsigned char uint8_t
Typedef defining 8 bit unsigned char type.
Definition: vl53l1_types.h:98
#define OPTICFLOW_FAST9_REGION_DETECT_CAMERA2
unsigned int uint32_t
Typedef defining 32 bit unsigned int type.
Definition: vl53l1_types.h:78
int16_t flow_der_x
The derotated flow calculation in the x direction (in subpixels)
float center_x
center image coordinate in the x-direction
Definition: video_device.h:49
#define OPTICFLOW_FAST9_ADAPTIVE_CAMERA2
int actfast_min_gradient
Threshold that decides when there is sufficient texture for edge following.