Paparazzi UAS  v5.14.0_stable-0-g3f680d1
Paparazzi is a free software Unmanned Aircraft System.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
opticflow_calculator.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2014 Hann Woei Ho
3  * 2015 Freek van Tienen <freek.v.tienen@gmail.com>
4  * 2016 Kimberly McGuire <k.n.mcguire@tudelft.nl
5  *
6  * This file is part of Paparazzi.
7  *
8  * Paparazzi is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * Paparazzi is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with Paparazzi; see the file COPYING. If not, see
20  * <http://www.gnu.org/licenses/>.
21  */
22 
29 #include "std.h"
30 
31 #include <stdio.h>
32 #include <string.h>
33 #include <stdlib.h>
34 
35 // Own Header
36 #include "opticflow_calculator.h"
37 
38 // Computer Vision
39 #include "lib/vision/image.h"
41 #include "lib/vision/fast_rosten.h"
42 #include "lib/vision/act_fast.h"
43 #include "lib/vision/edge_flow.h"
45 #include "size_divergence.h"
46 #include "linear_flow_fit.h"
47 #include "modules/sonar/agl_dist.h"
48 
49 // to get the definition of front_camera / bottom_camera
50 #include BOARD_CONFIG
51 
52 // whether to show the flow and corners:
53 #define OPTICFLOW_SHOW_CORNERS 0
54 
55 #define EXHAUSTIVE_FAST 0
56 #define ACT_FAST 1
57 // TODO: these are now adapted, but perhaps later could be a setting:
60 
61 // What methods are run to determine divergence, lateral flow, etc.
62 // SIZE_DIV looks at line sizes and only calculates divergence
63 #define SIZE_DIV 1
64 // LINEAR_FIT makes a linear optical flow field fit and extracts a lot of information:
65 // relative velocities in x, y, z (divergence / time to contact), the slope of the surface, and the surface roughness.
66 #define LINEAR_FIT 1
67 
68 #ifndef OPTICFLOW_CORNER_METHOD
69 #define OPTICFLOW_CORNER_METHOD ACT_FAST
70 #endif
71 PRINT_CONFIG_VAR(OPTICFLOW_CORNER_METHOD)
72 
73 /* Set the default values */
74 #ifndef OPTICFLOW_MAX_TRACK_CORNERS
75 #define OPTICFLOW_MAX_TRACK_CORNERS 25
76 #endif
77 PRINT_CONFIG_VAR(OPTICFLOW_MAX_TRACK_CORNERS)
78 
79 #ifndef OPTICFLOW_WINDOW_SIZE
80 #define OPTICFLOW_WINDOW_SIZE 10
81 #endif
82 PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE)
83 
84 #ifndef OPTICFLOW_SEARCH_DISTANCE
85 #define OPTICFLOW_SEARCH_DISTANCE 20
86 #endif
87 PRINT_CONFIG_VAR(OPTICFLOW_SEARCH_DISTANCE)
88 
89 #ifndef OPTICFLOW_SUBPIXEL_FACTOR
90 #define OPTICFLOW_SUBPIXEL_FACTOR 10
91 #endif
92 PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR)
93 
94 #ifndef OPTICFLOW_RESOLUTION_FACTOR
95 #define OPTICFLOW_RESOLUTION_FACTOR 100
96 #endif
97 PRINT_CONFIG_VAR(OPTICFLOW_RESOLUTION_FACTOR)
98 
99 #ifndef OPTICFLOW_MAX_ITERATIONS
100 #define OPTICFLOW_MAX_ITERATIONS 10
101 #endif
102 PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS)
103 
104 #ifndef OPTICFLOW_THRESHOLD_VEC
105 #define OPTICFLOW_THRESHOLD_VEC 2
106 #endif
107 PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC)
108 
109 #ifndef OPTICFLOW_PYRAMID_LEVEL
110 #define OPTICFLOW_PYRAMID_LEVEL 2
111 #endif
112 PRINT_CONFIG_VAR(OPTICFLOW_PYRAMID_LEVEL)
113 
114 #ifndef OPTICFLOW_FAST9_ADAPTIVE
115 #define OPTICFLOW_FAST9_ADAPTIVE TRUE
116 #endif
117 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE)
118 
119 #ifndef OPTICFLOW_FAST9_THRESHOLD
120 #define OPTICFLOW_FAST9_THRESHOLD 20
121 #endif
122 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD)
123 
124 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE
125 #define OPTICFLOW_FAST9_MIN_DISTANCE 10
126 #endif
127 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE)
128 
129 #ifndef OPTICFLOW_FAST9_PADDING
130 #define OPTICFLOW_FAST9_PADDING 20
131 #endif
132 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_PADDING)
133 
134 // thresholds FAST9 that are currently not set from the GCS:
135 #define FAST9_LOW_THRESHOLD 5
136 #define FAST9_HIGH_THRESHOLD 60
137 
138 #ifndef OPTICFLOW_METHOD
139 #define OPTICFLOW_METHOD 0
140 #endif
141 PRINT_CONFIG_VAR(OPTICFLOW_METHOD)
142 
143 #if OPTICFLOW_METHOD > 1
144 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected
145 #endif
146 
147 #ifndef OPTICFLOW_DEROTATION
148 #define OPTICFLOW_DEROTATION TRUE
149 #endif
150 PRINT_CONFIG_VAR(OPTICFLOW_DEROTATION)
151 
152 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
153 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X 1.0
154 #endif
156 
157 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
158 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y 1.0
159 #endif
161 
162 #ifndef OPTICFLOW_MEDIAN_FILTER
163 #define OPTICFLOW_MEDIAN_FILTER FALSE
164 #endif
165 PRINT_CONFIG_VAR(OPTICFLOW_MEDIAN_FILTER)
166 
167 #ifndef OPTICFLOW_FEATURE_MANAGEMENT
168 #define OPTICFLOW_FEATURE_MANAGEMENT 0
169 #endif
170 PRINT_CONFIG_VAR(OPTICFLOW_FEATURE_MANAGEMENT)
171 
172 #ifndef OPTICFLOW_FAST9_REGION_DETECT
173 #define OPTICFLOW_FAST9_REGION_DETECT 1
174 #endif
175 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_REGION_DETECT)
176 
177 #ifndef OPTICFLOW_FAST9_NUM_REGIONS
178 #define OPTICFLOW_FAST9_NUM_REGIONS 9
179 #endif
180 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_NUM_REGIONS)
181 
182 #ifndef OPTICFLOW_ACTFAST_LONG_STEP
183 #define OPTICFLOW_ACTFAST_LONG_STEP 10
184 #endif
185 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_LONG_STEP)
186 
187 #ifndef OPTICFLOW_ACTFAST_SHORT_STEP
188 #define OPTICFLOW_ACTFAST_SHORT_STEP 2
189 #endif
190 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_SHORT_STEP)
191 
192 #ifndef OPTICFLOW_ACTFAST_GRADIENT_METHOD
193 #define OPTICFLOW_ACTFAST_GRADIENT_METHOD 1
194 #endif
195 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_GRADIENT_METHOD)
196 
197 #ifndef OPTICFLOW_ACTFAST_MIN_GRADIENT
198 #define OPTICFLOW_ACTFAST_MIN_GRADIENT 10
199 #endif
200 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_MIN_GRADIENT)
201 
202 // Defaults for ARdrone
203 #ifndef OPTICFLOW_BODY_TO_CAM_PHI
204 #define OPTICFLOW_BODY_TO_CAM_PHI 0
205 #endif
206 #ifndef OPTICFLOW_BODY_TO_CAM_THETA
207 #define OPTICFLOW_BODY_TO_CAM_THETA 0
208 #endif
209 #ifndef OPTICFLOW_BODY_TO_CAM_PSI
210 #define OPTICFLOW_BODY_TO_CAM_PSI -M_PI_2
211 #endif
212 
213 // Tracking back flow to make the accepted flow vectors more robust:
214 // Default is false, as it does take extra processing time
215 #ifndef OPTICFLOW_TRACK_BACK
216 #define OPTICFLOW_TRACK_BACK FALSE
217 #endif
218 PRINT_CONFIG_VAR(OPTICFLOW_TRACK_BACK)
219 
220 // Whether to draw the flow on the image:
221 // False by default, since it changes the image and costs time.
222 #ifndef OPTICFLOW_SHOW_FLOW
223 #define OPTICFLOW_SHOW_FLOW FALSE
224 #endif
225 PRINT_CONFIG_VAR(OPTICFLOW_SHOW_FLOW)
226 
227 
228 
229 //Include median filter
230 #include "filters/median_filter.h"
233 
234 /* Functions only used here */
235 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime);
236 static int cmp_flow(const void *a, const void *b);
237 static int cmp_array(const void *a, const void *b);
238 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow,
239  struct opticflow_result_t *result);
240 
241 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
242  float theta_diff, float psi_diff, struct opticflow_t *opticflow);
248 {
249  /* Set the default values */
250  opticflow->method = OPTICFLOW_METHOD; //0 = LK_fast9, 1 = Edgeflow
251  opticflow->window_size = OPTICFLOW_WINDOW_SIZE;
253  opticflow->derotation = OPTICFLOW_DEROTATION; //0 = OFF, 1 = ON
256  opticflow->track_back = OPTICFLOW_TRACK_BACK;
257  opticflow->show_flow = OPTICFLOW_SHOW_FLOW;
260  if (opticflow->subpixel_factor == 0) {
261  opticflow->subpixel_factor = 10;
262  }
271 
276  opticflow->fast9_rsize = 512;
277  opticflow->fast9_ret_corners = calloc(opticflow->fast9_rsize, sizeof(struct point_t));
278 
284 
287 
288 }
298  struct opticflow_result_t *result)
299 {
300  if (opticflow->just_switched_method) {
301  // Create the image buffers
302  image_create(&opticflow->img_gray, img->w, img->h, IMAGE_GRAYSCALE);
303  image_create(&opticflow->prev_img_gray, img->w, img->h, IMAGE_GRAYSCALE);
304 
305  // Set the previous values
306  opticflow->got_first_img = false;
307 
308  // Init median filters with zeros
310  }
311 
312  // Convert image to grayscale
313  image_to_grayscale(img, &opticflow->img_gray);
314 
315  if (!opticflow->got_first_img) {
316  image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
317  opticflow->got_first_img = true;
318  return false;
319  }
320 
321  // variables for linear flow fit:
322  float error_threshold;
323  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
324  struct linear_flow_fit_info fit_info;
325 
326  // Update FPS for information
327  float dt = timeval_diff(&(opticflow->prev_img_gray.ts), &(img->ts));
328  if (dt > 1e-5) {
329  result->fps = 1000.f / dt;
330  } else {
331  return false;
332  }
333 
334  // *************************************************************************************
335  // Corner detection
336  // *************************************************************************************
337 
338  // if feature_management is selected and tracked corners drop below a threshold, redetect
339  if ((opticflow->feature_management) && (result->corner_cnt < opticflow->max_track_corners / 2)) {
340  manage_flow_features(img, opticflow, result);
341  } else if (!opticflow->feature_management) {
342  // needs to be set to 0 because result is now static
343  result->corner_cnt = 0;
344 
345  if (opticflow->corner_method == EXHAUSTIVE_FAST) {
346  // FAST corner detection
347  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
348  // to 0 (see defines), however a more permanent solution should be considered
349  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
350  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
351  &opticflow->fast9_rsize,
352  &opticflow->fast9_ret_corners,
353  NULL);
354 
355  } else if (opticflow->corner_method == ACT_FAST) {
356  // ACT-FAST corner detection:
357  act_fast(&opticflow->prev_img_gray, opticflow->fast9_threshold, &result->corner_cnt,
359  opticflow->actfast_long_step, opticflow->actfast_short_step, opticflow->actfast_min_gradient,
360  opticflow->actfast_gradient_method);
361  }
362 
363  // Adaptive threshold
364  if (opticflow->fast9_adaptive) {
365 
366  // This works well for exhaustive FAST, but drives the threshold to the minimum for ACT-FAST:
367  // Decrease and increase the threshold based on previous values
368  if (result->corner_cnt < 40) { // TODO: Replace 40 with OPTICFLOW_MAX_TRACK_CORNERS / 2
369  // make detections easier:
370  if (opticflow->fast9_threshold > FAST9_LOW_THRESHOLD) {
371  opticflow->fast9_threshold--;
372  }
373 
374  if (opticflow->corner_method == ACT_FAST) {
375  n_time_steps++;
376  n_agents++;
377  }
378 
379  } else if (result->corner_cnt > OPTICFLOW_MAX_TRACK_CORNERS * 2 && opticflow->fast9_threshold < FAST9_HIGH_THRESHOLD) {
380  opticflow->fast9_threshold++;
381  if (opticflow->corner_method == ACT_FAST && n_time_steps > 5 && n_agents > 10) {
382  n_time_steps--;
383  n_agents--;
384  }
385  }
386  }
387  }
388 
389 #if OPTICFLOW_SHOW_CORNERS
390  image_show_points(img, opticflow->fast9_ret_corners, result->corner_cnt);
391 #endif
392 
393  // Check if we found some corners to track
394  if (result->corner_cnt < 1) {
395  // Clear the result otherwise the previous values will be returned for this frame too
396  VECT3_ASSIGN(result->vel_cam, 0, 0, 0);
397  VECT3_ASSIGN(result->vel_body, 0, 0, 0);
398  result->div_size = 0; result->divergence = 0;
399  result->noise_measurement = 5.0;
400 
401  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
402  return false;
403  }
404 
405  // *************************************************************************************
406  // Corner Tracking
407  // *************************************************************************************
408 
409  // Execute a Lucas Kanade optical flow
410  result->tracked_cnt = result->corner_cnt;
411  uint8_t keep_bad_points = 0;
412  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, opticflow->fast9_ret_corners,
413  &result->tracked_cnt,
414  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
415  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level, keep_bad_points);
416 
417 
418  if (opticflow->track_back) {
419  // TODO: Watch out!
420  // We track the flow back and give badly back-tracked vectors a high error,
421  // but we do not yet remove these vectors, nor use the errors in any other function than showing the flow.
422 
423  // initialize corners at the tracked positions:
424  for (int i = 0; i < result->tracked_cnt; i++) {
425  opticflow->fast9_ret_corners[i].x = (uint32_t)(vectors[i].pos.x + vectors[i].flow_x) / opticflow->subpixel_factor;
426  opticflow->fast9_ret_corners[i].y = (uint32_t)(vectors[i].pos.y + vectors[i].flow_y) / opticflow->subpixel_factor;
427  }
428 
429  // present the images in the opposite order:
430  keep_bad_points = 1;
431  uint16_t back_track_cnt = result->tracked_cnt;
432  struct flow_t *back_vectors = opticFlowLK(&opticflow->prev_img_gray, &opticflow->img_gray, opticflow->fast9_ret_corners,
433  &back_track_cnt,
434  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
435  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level, keep_bad_points);
436 
437  // printf("Tracked %d points back.\n", back_track_cnt);
438  int32_t back_x, back_y, diff_x, diff_y, dist_squared;
439  int32_t back_track_threshold = 200;
440 
441  for (int i = 0; i < result->tracked_cnt; i++) {
442  if (back_vectors[i].error < LARGE_FLOW_ERROR) {
443  back_x = (int32_t)(back_vectors[i].pos.x + back_vectors[i].flow_x);
444  back_y = (int32_t)(back_vectors[i].pos.y + back_vectors[i].flow_y);
445  diff_x = back_x - vectors[i].pos.x;
446  diff_y = back_y - vectors[i].pos.y;
447  dist_squared = diff_x * diff_x + diff_y * diff_y;
448  // printf("Vector %d: x,y = %d, %d, back x, y = %d, %d, back tracking error %d\n", i, vectors[i].pos.x, vectors[i].pos.y, back_x, back_y, dist_squared);
449  if (dist_squared > back_track_threshold) {
450  vectors[i].error = LARGE_FLOW_ERROR;
451  }
452  } else {
453  vectors[i].error = LARGE_FLOW_ERROR;
454  }
455  }
456 
457  free(back_vectors);
458  }
459 
460  if (opticflow->show_flow) {
461  uint8_t color[4] = {0, 0, 0, 0};
462  uint8_t bad_color[4] = {0, 0, 0, 0};
463  image_show_flow_color(img, vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
464  }
465 
466  static int n_samples = 100;
467  // Estimate size divergence:
468  if (SIZE_DIV) {
469  result->div_size = get_size_divergence(vectors, result->tracked_cnt, n_samples);// * result->fps;
470  } else {
471  result->div_size = 0.0f;
472  }
473 
474  if (LINEAR_FIT) {
475  // Linear flow fit (normally derotation should be performed before):
476  error_threshold = 10.0f;
477  n_iterations_RANSAC = 20;
478  n_samples_RANSAC = 5;
479  success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
480  n_samples_RANSAC, img->w, img->h, &fit_info);
481 
482  if (!success_fit) {
483  fit_info.divergence = 0.0f;
484  fit_info.surface_roughness = 0.0f;
485  }
486 
487  result->divergence = fit_info.divergence;
488  result->surface_roughness = fit_info.surface_roughness;
489  } else {
490  result->divergence = 0.0f;
491  result->surface_roughness = 0.0f;
492  }
493 
494  // Get the median flow
495  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
496  if (result->tracked_cnt == 0) {
497  // We got no flow
498  result->flow_x = 0;
499  result->flow_y = 0;
500 
501  free(vectors);
502  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
503  return false;
504  } else if (result->tracked_cnt % 2) {
505  // Take the median point
506  result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
507  result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
508  } else {
509  // Take the average of the 2 median points
510  result->flow_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
511  result->flow_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
512  }
513 
514  // TODO scale flow to rad/s here
515 
516  // ***************
517  // Flow Derotation
518  // ***************
519 
520  float diff_flow_x = 0.f;
521  float diff_flow_y = 0.f;
522 
523  if (opticflow->derotation && result->tracked_cnt > 5) {
524 
525  float rotation_threshold = M_PI / 180.0f;
526  if (fabs(opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi) > rotation_threshold
527  || fabs(opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta) > rotation_threshold) {
528 
529  // do not apply the derotation if the rotation rates are too high:
530  result->flow_der_x = 0.0f;
531  result->flow_der_y = 0.0f;
532 
533  } else {
534 
535  // determine the roll, pitch, yaw differencces between the images.
536  float phi_diff = opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi;
537  float theta_diff = opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta;
538  float psi_diff = opticflow->img_gray.eulers.psi - opticflow->prev_img_gray.eulers.psi;
539 
540  if (strcmp(OPTICFLOW_CAMERA.dev_name, "/dev/video0") == 0) {
541 
542  // bottom cam: just subtract a scaled version of the roll and pitch difference from the global flow vector:
543  diff_flow_x = phi_diff * OPTICFLOW_CAMERA.camera_intrinsics.focal_x; // phi_diff works better than (cam_state->rates.p)
544  diff_flow_y = theta_diff * OPTICFLOW_CAMERA.camera_intrinsics.focal_y;
545  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
547  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
549  } else {
550 
551  // frontal cam, predict individual flow vectors:
552  struct flow_t *predicted_flow_vectors = predict_flow_vectors(vectors, result->tracked_cnt, phi_diff, theta_diff,
553  psi_diff, opticflow);
554  if (opticflow->show_flow) {
555  uint8_t color[4] = {255, 255, 255, 255};
556  uint8_t bad_color[4] = {255, 255, 255, 255};
557  image_show_flow_color(img, predicted_flow_vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
558  }
559 
560  for (int i = 0; i < result->tracked_cnt; i++) {
561  // subtract the flow:
562  vectors[i].flow_x -= predicted_flow_vectors[i].flow_x;
563  vectors[i].flow_y -= predicted_flow_vectors[i].flow_y;
564  }
565 
566  // vectors have to be re-sorted after derotation:
567  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
568 
569  if (result->tracked_cnt % 2) {
570  // Take the median point
571  result->flow_der_x = vectors[result->tracked_cnt / 2].flow_x;
572  result->flow_der_y = vectors[result->tracked_cnt / 2].flow_y;
573  } else {
574  // Take the average of the 2 median points
575  result->flow_der_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
576  result->flow_der_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
577  }
578  }
579  }
580  }
581 
582  // Velocity calculation
583  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
584  // TODO: Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
585  // TODO: This is actually only correct for the bottom camera:
586  result->vel_cam.x = (float)result->flow_der_x * result->fps * agl_dist_value_filtered /
587  (opticflow->subpixel_factor * OPTICFLOW_CAMERA.camera_intrinsics.focal_x);
588  result->vel_cam.y = (float)result->flow_der_y * result->fps * agl_dist_value_filtered /
589  (opticflow->subpixel_factor * OPTICFLOW_CAMERA.camera_intrinsics.focal_y);
590  result->vel_cam.z = result->divergence * result->fps * agl_dist_value_filtered;
591 
592  //Apply a median filter to the velocity if wanted
593  if (opticflow->median_filter == true) {
595  }
596 
597  // Determine quality of noise measurement for state filter
598  //TODO develop a noise model based on groundtruth
599  //result->noise_measurement = 1 - (float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25f);
600  result->noise_measurement = 0.25;
601 
602  // *************************************************************************************
603  // Next Loop Preparation
604  // *************************************************************************************
605  if (opticflow->feature_management) {
606  result->corner_cnt = result->tracked_cnt;
607  //get the new positions of the corners and the "residual" subpixel positions
608  for (uint16_t i = 0; i < result->tracked_cnt; i++) {
609  opticflow->fast9_ret_corners[i].x = (uint32_t)((vectors[i].pos.x + (float)vectors[i].flow_x) /
610  opticflow->subpixel_factor);
611  opticflow->fast9_ret_corners[i].y = (uint32_t)((vectors[i].pos.y + (float)vectors[i].flow_y) /
612  opticflow->subpixel_factor);
613  opticflow->fast9_ret_corners[i].x_sub = (uint16_t)((vectors[i].pos.x + vectors[i].flow_x) % opticflow->subpixel_factor);
614  opticflow->fast9_ret_corners[i].y_sub = (uint16_t)((vectors[i].pos.y + vectors[i].flow_y) % opticflow->subpixel_factor);
615  opticflow->fast9_ret_corners[i].count = vectors[i].pos.count;
616  }
617  }
618  free(vectors);
619  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
620 
621  return true;
622 }
623 
624 /*
625  * Predict flow vectors by means of the rotation rates:
626  */
627 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
628  float theta_diff, float psi_diff, struct opticflow_t *opticflow)
629 {
630 
631  // reserve memory for the predicted flow vectors:
632  struct flow_t *predicted_flow_vectors = malloc(sizeof(struct flow_t) * n_points);
633 
634  float K[9] = {OPTICFLOW_CAMERA.camera_intrinsics.focal_x, 0.0f, OPTICFLOW_CAMERA.camera_intrinsics.center_x,
635  0.0f, OPTICFLOW_CAMERA.camera_intrinsics.focal_y, OPTICFLOW_CAMERA.camera_intrinsics.center_y,
636  0.0f, 0.0f, 1.0f
637  };
638  // TODO: make an option to not do distortion / undistortion (Dhane_k = 1)
639  float k = OPTICFLOW_CAMERA.camera_intrinsics.Dhane_k;
640 
641  float A, B, C; // as in Longuet-Higgins
642 
643  if (strcmp(OPTICFLOW_CAMERA.dev_name, "/dev/video1") == 0) {
644  // specific for the x,y swapped Bebop 2 images:
645  A = -psi_diff;
646  B = theta_diff;
647  C = phi_diff;
648  } else {
649  A = theta_diff;
650  B = phi_diff;
651  C = psi_diff;
652  }
653 
654  float x_n, y_n;
655  float x_n_new, y_n_new, x_pix_new, y_pix_new;
656  float predicted_flow_x, predicted_flow_y;
657  for (uint16_t i = 0; i < n_points; i++) {
658  // the from-coordinate is always the same:
659  predicted_flow_vectors[i].pos.x = flow_vectors[i].pos.x;
660  predicted_flow_vectors[i].pos.y = flow_vectors[i].pos.y;
661 
662  bool success = distorted_pixels_to_normalized_coords((float)flow_vectors[i].pos.x / opticflow->subpixel_factor,
663  (float)flow_vectors[i].pos.y / opticflow->subpixel_factor, &x_n, &y_n, k, K);
664  if (success) {
665  // predict flow as in a linear pinhole camera model:
666  predicted_flow_x = A * x_n * y_n - B * x_n * x_n - B + C * y_n;
667  predicted_flow_y = -C * x_n + A + A * y_n * y_n - B * x_n * y_n;
668 
669  x_n_new = x_n + predicted_flow_x;
670  y_n_new = y_n + predicted_flow_y;
671 
672  success = normalized_coords_to_distorted_pixels(x_n_new, y_n_new, &x_pix_new, &y_pix_new, k, K);
673 
674  if (success) {
675  predicted_flow_vectors[i].flow_x = (int16_t)(x_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.x);
676  predicted_flow_vectors[i].flow_y = (int16_t)(y_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.y);
677  predicted_flow_vectors[i].error = 0;
678  } else {
679  predicted_flow_vectors[i].flow_x = 0;
680  predicted_flow_vectors[i].flow_y = 0;
681  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
682  }
683  } else {
684  predicted_flow_vectors[i].flow_x = 0;
685  predicted_flow_vectors[i].flow_y = 0;
686  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
687  }
688  }
689  return predicted_flow_vectors;
690 }
691 
692 
693 /* manage_flow_features - Update list of corners to be tracked by LK
694  * Remembers previous points and tries to find new points in less dense
695  * areas of the image first.
696  *
697  */
698 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
699 {
700  // first check if corners have not moved too close together due to flow:
701  int16_t c1 = 0;
702  while (c1 < (int16_t)result->corner_cnt - 1) {
703  bool exists = false;
704  for (int16_t i = c1 + 1; i < result->corner_cnt; i++) {
705  if (abs((int16_t)opticflow->fast9_ret_corners[c1].x - (int16_t)opticflow->fast9_ret_corners[i].x) <
706  opticflow->fast9_min_distance / 2
707  && abs((int16_t)opticflow->fast9_ret_corners[c1].y - (int16_t)opticflow->fast9_ret_corners[i].y) <
708  opticflow->fast9_min_distance / 2) {
709  // if too close, replace the corner with the last one in the list:
710  opticflow->fast9_ret_corners[c1].x = opticflow->fast9_ret_corners[result->corner_cnt - 1].x;
711  opticflow->fast9_ret_corners[c1].y = opticflow->fast9_ret_corners[result->corner_cnt - 1].y;
712  opticflow->fast9_ret_corners[c1].count = opticflow->fast9_ret_corners[result->corner_cnt - 1].count;
713  opticflow->fast9_ret_corners[c1].x_sub = opticflow->fast9_ret_corners[result->corner_cnt - 1].x_sub;
714  opticflow->fast9_ret_corners[c1].y_sub = opticflow->fast9_ret_corners[result->corner_cnt - 1].y_sub;
715 
716  // decrease the number of corners:
717  result->corner_cnt--;
718  exists = true;
719  // no further checking required for the removed corner
720  break;
721  }
722  }
723  // if the corner has been replaced, the new corner in position c1 has to be checked again:
724  if (!exists) { c1++; }
725  }
726 
727  // no need for "per region" re-detection when there are no previous corners
728  if ((!opticflow->fast9_region_detect) || (result->corner_cnt == 0)) {
729  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
730  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
731  &opticflow->fast9_rsize,
732  &opticflow->fast9_ret_corners,
733  NULL);
734  } else {
735  // allocating memory and initializing the 2d array that holds the number of corners per region and its index (for the sorting)
736  uint16_t **region_count = calloc(opticflow->fast9_num_regions, sizeof(uint16_t *));
737  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
738  region_count[i] = calloc(2, sizeof(uint16_t));
739  region_count[i][0] = 0;
740  region_count[i][1] = i;
741  }
742  uint16_t root_regions = (uint16_t)sqrtf((float)opticflow->fast9_num_regions);
743  int region_index;
744  for (uint16_t i = 0; i < result->corner_cnt; i++) {
745  region_index = (opticflow->fast9_ret_corners[i].x * root_regions / img->w
746  + root_regions * (opticflow->fast9_ret_corners[i].y * root_regions / img->h));
747  region_index = (region_index < opticflow->fast9_num_regions) ? region_index : opticflow->fast9_num_regions - 1;
748  region_count[region_index][0]++;
749  }
750 
751  //sorting region_count array according to first column (number of corners).
752  qsort(region_count, opticflow->fast9_num_regions, sizeof(region_count[0]), cmp_array);
753 
754  uint16_t roi[4];
755  // Detecting corners from the region with the less to the one with the most, until a desired total is reached.
756  for (uint16_t i = 0; i < opticflow->fast9_num_regions && result->corner_cnt < 2 * opticflow->max_track_corners; i++) {
757  // Find the boundaries of the region of interest
758  roi[0] = (region_count[i][1] % root_regions) * (img->w / root_regions);
759  roi[1] = (region_count[i][1] / root_regions) * (img->h / root_regions);
760  roi[2] = roi[0] + (img->w / root_regions);
761  roi[3] = roi[1] + (img->h / root_regions);
762 
763  struct point_t *new_corners = calloc(opticflow->fast9_rsize, sizeof(struct point_t));
764  uint16_t new_count = 0;
765 
766  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
767  opticflow->fast9_padding, opticflow->fast9_padding, &new_count,
768  &opticflow->fast9_rsize, &new_corners, roi);
769 
770  // check that no identified points already exist in list
771  for (uint16_t j = 0; j < new_count; j++) {
772  bool exists = false;
773  for (uint16_t k = 0; k < result->corner_cnt; k++) {
774  if (abs((int16_t)new_corners[j].x - (int16_t)opticflow->fast9_ret_corners[k].x) < (int16_t)opticflow->fast9_min_distance
775  && abs((int16_t)new_corners[j].y - (int16_t)opticflow->fast9_ret_corners[k].y) < (int16_t)
776  opticflow->fast9_min_distance) {
777  exists = true;
778  break;
779  }
780  }
781  if (!exists) {
782  opticflow->fast9_ret_corners[result->corner_cnt].x = new_corners[j].x;
783  opticflow->fast9_ret_corners[result->corner_cnt].y = new_corners[j].y;
784  opticflow->fast9_ret_corners[result->corner_cnt].count = 0;
785  opticflow->fast9_ret_corners[result->corner_cnt].x_sub = 0;
786  opticflow->fast9_ret_corners[result->corner_cnt].y_sub = 0;
787  result->corner_cnt++;
788 
789  if (result->corner_cnt >= opticflow->fast9_rsize) {
790  break;
791  }
792  }
793  }
794 
795  free(new_corners);
796  }
797  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
798  free(region_count[i]);
799  }
800  free(region_count);
801  }
802 }
803 
812 bool calc_edgeflow_tot(struct opticflow_t *opticflow, struct image_t *img,
813  struct opticflow_result_t *result)
814 {
815  // Define Static Variables
816  static struct edge_hist_t edge_hist[MAX_HORIZON];
817  static uint8_t current_frame_nr = 0;
818  struct edge_flow_t edgeflow;
819  static uint8_t previous_frame_offset[2] = {1, 1};
820 
821  // Define Normal variables
822  struct edgeflow_displacement_t displacement;
823  displacement.x = calloc(img->w, sizeof(int32_t));
824  displacement.y = calloc(img->h, sizeof(int32_t));
825 
826  // If the methods just switched to this one, reintialize the
827  // array of edge_hist structure.
828  if (opticflow->just_switched_method == 1 && edge_hist[0].x == NULL) {
829  int i;
830  for (i = 0; i < MAX_HORIZON; i++) {
831  edge_hist[i].x = calloc(img->w, sizeof(int32_t));
832  edge_hist[i].y = calloc(img->h, sizeof(int32_t));
833  FLOAT_EULERS_ZERO(edge_hist[i].eulers);
834  }
835  }
836 
837  uint16_t disp_range;
838  if (opticflow->search_distance < DISP_RANGE_MAX) {
839  disp_range = opticflow->search_distance;
840  } else {
841  disp_range = DISP_RANGE_MAX;
842  }
843 
844  uint16_t window_size;
845 
846  if (opticflow->window_size < MAX_WINDOW_SIZE) {
847  window_size = opticflow->window_size;
848  } else {
849  window_size = MAX_WINDOW_SIZE;
850  }
851 
852  uint16_t RES = opticflow->resolution_factor;
853 
854  //......................Calculating EdgeFlow..................... //
855 
856  // Calculate current frame's edge histogram
857  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
858  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
859  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
860  calculate_edge_histogram(img, edge_hist_y, 'y', 0);
861 
862 
863  // Copy frame time and angles of image to calculated edge histogram
864  edge_hist[current_frame_nr].frame_time = img->ts;
865  edge_hist[current_frame_nr].eulers = img->eulers;
866 
867  // Calculate which previous edge_hist to compare with the current
868  uint8_t previous_frame_nr[2];
869  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);
870 
871  //Select edge histogram from the previous frame nr
872  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
873  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;
874 
875  //Calculate the corresponding derotation of the two frames
876  int16_t der_shift_x = 0;
877  int16_t der_shift_y = 0;
878 
879  if (opticflow->derotation) {
880  der_shift_x = (int16_t)((edge_hist[current_frame_nr].eulers.phi - edge_hist[previous_frame_nr[0]].eulers.phi) *
881  OPTICFLOW_CAMERA.camera_intrinsics.focal_x * opticflow->derotation_correction_factor_x);
882  der_shift_y = (int16_t)((edge_hist[current_frame_nr].eulers.theta - edge_hist[previous_frame_nr[1]].eulers.theta) *
883  OPTICFLOW_CAMERA.camera_intrinsics.focal_y * opticflow->derotation_correction_factor_y);
884  }
885 
886  // Estimate pixel wise displacement of the edge histograms for x and y direction
887  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
888  displacement.x, img->w,
889  window_size, disp_range, der_shift_x);
890  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
891  displacement.y, img->h,
892  window_size, disp_range, der_shift_y);
893 
894  // Fit a line on the pixel displacement to estimate
895  // the global pixel flow and divergence (RES is resolution)
896  line_fit(displacement.x, &edgeflow.div_x,
897  &edgeflow.flow_x, img->w,
898  window_size + disp_range, RES);
899  line_fit(displacement.y, &edgeflow.div_y,
900  &edgeflow.flow_y, img->h,
901  window_size + disp_range, RES);
902 
903  /* Save Resulting flow in results
904  * Warning: The flow detected here is different in sign
905  * and size, therefore this will be divided with
906  * the same subpixel factor and multiplied by -1 to make it
907  * on par with the LK algorithm in opticalflow_calculator.c
908  * */
909  edgeflow.flow_x = -1 * edgeflow.flow_x;
910  edgeflow.flow_y = -1 * edgeflow.flow_y;
911 
912  edgeflow.flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
913  edgeflow.flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];
914 
915  result->flow_x = (int16_t)edgeflow.flow_x / RES;
916  result->flow_y = (int16_t)edgeflow.flow_y / RES;
917 
918  //Fill up the results optic flow to be on par with LK_fast9
919  result->flow_der_x = result->flow_x;
920  result->flow_der_y = result->flow_y;
921  result->corner_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
922  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
923  result->divergence = -1.0 * (float)edgeflow.div_x /
924  RES; // Also multiply the divergence with -1.0 to make it on par with the LK algorithm of
925  result->div_size =
926  result->divergence; // Fill the div_size with the divergence to atleast get some divergenge measurement when switching from LK to EF
927  result->surface_roughness = 0.0f;
928 
929  //......................Calculating VELOCITY ..................... //
930 
931  /*Estimate fps per direction
932  * This is the fps with adaptive horizon for subpixel flow, which is not similar
933  * to the loop speed of the algorithm. The faster the quadcopter flies
934  * the higher it becomes
935  */
936  float fps_x = 0;
937  float fps_y = 0;
938  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
939  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
940  fps_x = 1 / (time_diff_x);
941  fps_y = 1 / (time_diff_y);
942 
943  result->fps = fps_x;
944 
945  // TODO scale flow to rad/s here
946 
947  // Calculate velocity
948  result->vel_cam.x = edgeflow.flow_x * fps_x * agl_dist_value_filtered * OPTICFLOW_CAMERA.camera_intrinsics.focal_x /
949  RES;
950  result->vel_cam.y = edgeflow.flow_y * fps_y * agl_dist_value_filtered * OPTICFLOW_CAMERA.camera_intrinsics.focal_y /
951  RES;
952  result->vel_cam.z = result->divergence * fps_x * agl_dist_value_filtered;
953 
954  //Apply a median filter to the velocity if wanted
955  if (opticflow->median_filter == true) {
957  }
958 
959  result->noise_measurement = 0.2;
960 
961 #if OPTICFLOW_SHOW_FLOW
962  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
963 #endif
964  // Increment and wrap current time frame
965  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
966 
967  // Free alloc'd variables
968  free(displacement.x);
969  free(displacement.y);
970 
971  return true;
972 }
973 
974 
983  struct opticflow_result_t *result)
984 {
985  bool flow_successful = false;
986  // A switch counter that checks in the loop if the current method is similar,
987  // to the previous (for reinitializing structs)
988  static int8_t switch_counter = -1;
989  if (switch_counter != opticflow->method) {
990  opticflow->just_switched_method = true;
991  switch_counter = opticflow->method;
992  // Clear the static result
993  memset(result, 0, sizeof(struct opticflow_result_t));
994  } else {
995  opticflow->just_switched_method = false;
996  }
997 
998  // Switch between methods (0 = fast9/lukas-kanade, 1 = EdgeFlow)
999  if (opticflow->method == 0) {
1000  flow_successful = calc_fast9_lukas_kanade(opticflow, img, result);
1001  } else if (opticflow->method == 1) {
1002  flow_successful = calc_edgeflow_tot(opticflow, img, result);
1003  }
1004 
1005  /* Rotate velocities from camera frame coordinates to body coordinates for control
1006  * IMPORTANT!!! This frame to body orientation should be the case for the Parrot
1007  * ARdrone and Bebop, however this can be different for other quadcopters
1008  * ALWAYS double check!
1009  */
1010  float_rmat_transp_vmult(&result->vel_body, &body_to_cam, &result->vel_cam);
1011 
1012  return flow_successful;
1013 }
1014 
1021 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
1022 {
1023  uint32_t msec;
1024  msec = (finishtime->tv_sec - starttime->tv_sec) * 1000;
1025  msec += (finishtime->tv_usec - starttime->tv_usec) / 1000;
1026  return msec;
1027 }
1028 
1036 static int cmp_flow(const void *a, const void *b)
1037 {
1038  const struct flow_t *a_p = (const struct flow_t *)a;
1039  const struct flow_t *b_p = (const struct flow_t *)b;
1040  return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y *
1041  b_p->flow_y);
1042 }
1043 
1051 static int cmp_array(const void *a, const void *b)
1052 {
1053  const uint16_t *pa = (const uint16_t *)a;
1054  const uint16_t *pb = (const uint16_t *)b;
1055  return pa[0] - pb[0];
1056 }
bool track_back
Whether to track flow vectors back to the previous image, in order to check if the back-tracked flow ...
unsigned short uint16_t
Definition: types.h:16
int actfast_gradient_method
Whether to use a simple or Sobel filter.
#define InitMedianFilterVect3Float(_f, _n)
int16_t flow_y
Flow in y direction from the camera (in subpixels) with Y positive to the bottom. ...
uint16_t fast9_min_distance
Minimum distance in pixels between corners.
void calculate_edge_displacement(int32_t *edge_histogram, int32_t *edge_histogram_prev, int32_t *displacement, uint16_t size, uint8_t window, uint8_t disp_range, int32_t der_shift)
Calculate_displacement calculates the displacement between two histograms.
Definition: edge_flow.c:168
int16_t flow_der_y
The derotated flow calculation in the y direction (in subpixels)
#define FAST9_HIGH_THRESHOLD
bool calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with fast9 and lukaskanade on a new image frame.
int32_t flow_x
Definition: edge_flow.h:77
struct FloatRMat body_to_cam
float div_size
Divergence as determined with the size_divergence script.
bool feature_management
Decides whether to keep track corners in memory for the next frame instead of re-detecting every time...
uint8_t max_iterations
The maximum amount of iterations the Lucas Kanade algorithm should do.
#define MAX_WINDOW_SIZE
Definition: edge_flow.h:55
float phi
in radians
#define float_rmat_of_eulers
bool opticflow_calc_frame(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow on a new image frame.
#define OPTICFLOW_TRACK_BACK
struct opticflow_t opticflow
Opticflow calculations.
struct MedianFilter3Float vel_filt
static float K[9]
#define OPTICFLOW_ACTFAST_MIN_GRADIENT
uint16_t window_size
Window size for the blockmatching algorithm (general value for all methods)
#define OPTICFLOW_ACTFAST_SHORT_STEP
calculate optical flow with EdgeFlow
void image_switch(struct image_t *a, struct image_t *b)
This will switch image *a and *b This is faster as image_copy because it doesn't copy the whole image...
Definition: image.c:100
#define OPTICFLOW_SHOW_FLOW
#define FLOAT_EULERS_ZERO(_e)
uint16_t fast9_rsize
Amount of corners allocated.
uint16_t n_agents
#define OPTICFLOW_PYRAMID_LEVEL
Calculate velocity from optic flow.
bool median_filter
Decides to use a median filter on the velocity.
#define VECT3_ASSIGN(_a, _x, _y, _z)
Definition: pprz_algebra.h:125
uint32_t getAmountPeaks(int32_t *edgehist, int32_t thres, int32_t size)
getAmountPeaks, calculates the amount of peaks in a edge histogram
Definition: edge_flow.c:337
#define UpdateMedianFilterVect3Float(_f, _v)
#define LARGE_FLOW_ERROR
Definition: lucas_kanade.h:37
#define OPTICFLOW_MEDIAN_FILTER
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
Definition: image.c:39
Definition: image.h:43
uint16_t n_time_steps
#define OPTICFLOW_BODY_TO_CAM_PHI
int32_t div_x
Definition: edge_flow.h:78
void draw_edgeflow_img(struct image_t *img, struct edge_flow_t edgeflow, int32_t *edge_hist_x_prev, int32_t *edge_hist_x)
Draws edgehistogram, displacement and linefit directly on the image for debugging (only for edgeflow ...
Definition: edge_flow.c:297
Definition: image.h:66
float psi
in radians
int32_t * y
Definition: edge_flow.h:66
int32_t flow_y
Definition: edge_flow.h:79
void image_show_flow_color(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor, const uint8_t *color, const uint8_t *bad_color)
Shows the flow from a specific point to a new point This works on YUV422 and Grayscale images...
Definition: image.c:702
#define OPTICFLOW_THRESHOLD_VEC
#define OPTICFLOW_FAST9_MIN_DISTANCE
void image_copy(struct image_t *input, struct image_t *output)
Copy an image from inut to output This will only work if the formats are the same.
Definition: image.c:78
#define OPTICFLOW_FAST9_ADAPTIVE
bool calc_edgeflow_tot(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with EDGEFLOW on a new image frame.
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt)
Show points in an image by coloring them through giving the pixels the maximum value.
Definition: image.c:638
static int cmp_flow(const void *a, const void *b)
Compare two flow vectors based on flow distance Used for sorting.
#define RES
Definition: detect_window.c:27
Calculate divergence from flow vectors by looking at line sizes beteween the points.
struct FloatVect3 vel_body
The velocity in body frame (m/s) with X positive to the front of the aircraft, Y positive to the righ...
uint32_t x
The x coordinate of the point.
Definition: image.h:58
#define OPTICFLOW_ACTFAST_LONG_STEP
uint16_t resolution_factor
The resolution in EdgeFlow to determine the Divergence.
static struct flow_t * predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff, float theta_diff, float psi_diff, struct opticflow_t *opticflow)
uint8_t threshold_vec
The threshold in x, y subpixels which the algorithm should stop.
void calculate_edge_histogram(struct image_t *img, int32_t edge_histogram[], char direction, uint16_t edge_threshold)
Calculate a edge/gradient histogram for each dimension of the image.
Definition: edge_flow.c:88
bool derotation
Derotation switched on or off (depended on the quality of the gyroscope measurement) ...
Finds corners in an image by actively scanning the image.
uint16_t y_sub
The y subpixel coordinate of the point.
Definition: image.h:62
uint8_t method
Method to use to calculate the optical flow.
#define B
euler angles
int n_samples
Definition: detect_gate.c:85
uint8_t max_track_corners
Maximum amount of corners Lucas Kanade should track.
uint8_t fast9_num_regions
The number of regions of interest the image is split into.
float theta
in radians
void opticflow_calc_init(struct opticflow_t *opticflow)
Initialize the opticflow calculator.
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
float get_size_divergence(struct flow_t *vectors, int count, int n_samples)
Get divergence from optical flow vectors based on line sizes between corners.
Image helper functions like resizing, color filter, converters...
#define MAX_HORIZON
Definition: edge_flow.h:45
uint16_t tracked_cnt
The amount of tracked corners.
void float_rmat_transp_vmult(struct FloatVect3 *vb, struct FloatRMat *m_b2a, struct FloatVect3 *va)
rotate 3D vector by transposed rotation matrix.
#define OPTICFLOW_RESOLUTION_FACTOR
#define OPTICFLOW_FEATURE_MANAGEMENT
bool normalized_coords_to_distorted_pixels(float x_n, float y_n, float *x_pd, float *y_pd, float k, const float *K)
Transform normalized coordinates to distorted pixel coordinates.
Definition: undistortion.c:146
uint16_t subpixel_factor
The amount of subpixels per pixel.
#define OPTICFLOW_FAST9_PADDING
bool analyze_linear_flow_field(struct flow_t *vectors, int count, float error_threshold, int n_iterations, int n_samples, int im_width, int im_height, struct linear_flow_fit_info *info)
Analyze a linear flow field, retrieving information such as divergence, surface roughness, focus of expansion, etc.
int16_t flow_x
The x direction flow in subpixels.
Definition: image.h:68
#define OPTICFLOW_CORNER_METHOD
struct point_t * fast9_ret_corners
Corners.
uint16_t w
Image width.
Definition: image.h:45
unsigned long uint32_t
Definition: types.h:18
#define OPTICFLOW_FAST9_THRESHOLD
uint16_t h
Image height.
Definition: image.h:46
struct image_t prev_img_gray
Previous gray image frame.
signed short int16_t
Definition: types.h:17
struct FloatVect3 vel_cam
The velocity in camera frame (m/s)
#define OPTICFLOW_METHOD
static uint16_t c1
Definition: baro_MS5534A.c:203
#define OPTICFLOW_BODY_TO_CAM_PSI
uint8_t fast9_threshold
FAST9 corner detection threshold.
bool just_switched_method
Boolean to check if methods has been switched (for reinitialization)
#define OPTICFLOW_FAST9_REGION_DETECT
void act_fast(struct image_t *img, uint8_t fast_threshold, uint16_t *num_corners, struct point_t **ret_corners, uint16_t n_agents, uint16_t n_time_steps, float long_step, float short_step, int min_gradient, int gradient_method)
Do an ACT-FAST corner detection.
Definition: act_fast.c:48
Bind to agl ABI message and provide a filtered value to be used in flight plans.
struct point_t pos
The original position the flow comes from.
Definition: image.h:67
uint32_t y
The y coordinate of the point.
Definition: image.h:59
#define OPTICFLOW_WINDOW_SIZE
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
Definition: image.c:119
Definition: image.h:57
#define EXHAUSTIVE_FAST
signed long int32_t
Definition: types.h:19
uint16_t x_sub
The x subpixel coordinate of the point.
Definition: image.h:61
Functions for undistorting camera images.
struct image_t img_gray
Current gray image frame.
static int cmp_array(const void *a, const void *b)
Compare the rows of an integer (uint16_t) 2D array based on the first column.
uint32_t error
The matching error in the tracking process.
Definition: image.h:70
#define OPTICFLOW_FAST9_NUM_REGIONS
#define SIZE_DIV
uint16_t corner_cnt
The amount of coners found by FAST9.
unsigned char uint8_t
Definition: types.h:14
float fps
Frames per second of the optical flow calculation.
efficient fixed-point optical-flow calculation
#define OPTICFLOW_DEROTATION
bool show_flow
Whether to draw the flow vectors on the image. Watch out! This changes the image as will be received ...
struct timeval ts
The timestamp of creation.
Definition: image.h:47
bool got_first_img
If we got a image to work with.
float divergence
Basically, relative_velocity_z. Actual divergence of a 2D flow field is 2 * relative_velocity_z.
struct FloatEulers eulers
Definition: edge_flow.h:68
#define OPTICFLOW_SEARCH_DISTANCE
bool fast9_region_detect
Decides whether to detect fast9 corners in specific regions of interest or the whole image (only for ...
#define LINEAR_FIT
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
#define MEDIAN_DEFAULT_SIZE
Definition: median_filter.h:28
int32_t div_y
Definition: edge_flow.h:80
uint16_t count
Number of times the point has been tracked successfully.
Definition: image.h:60
uint16_t fast9_padding
Padding used in FAST9 detector.
float surface_roughness
The error of the linear fit is a measure of surface roughness.
bool fast9_adaptive
Whether the FAST9 threshold should be adaptive.
rotation matrix
bool distorted_pixels_to_normalized_coords(float x_pd, float y_pd, float *x_n, float *y_n, float k, const float *K)
Transform distorted pixel coordinates to normalized coordinates.
Definition: undistortion.c:128
#define OPTICFLOW_ACTFAST_GRADIENT_METHOD
#define OPTICFLOW_BODY_TO_CAM_THETA
#define ACT_FAST
float derotation_correction_factor_x
Correction factor for derotation in x axis, determined from a fit from the gyros and flow rotation...
#define A
static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
Grayscale image with only the Y part (uint8 per pixel)
Definition: image.h:37
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
Calculate the difference from start till finish.
void line_fit(int32_t *displacement, int32_t *divergence, int32_t *flow, uint32_t size, uint32_t border, uint16_t RES)
Fits a linear model to an array with pixel displacements with least squares.
Definition: edge_flow.c:247
#define OPTICFLOW_MAX_TRACK_CORNERS
uint8_t corner_method
Method to use for determining where the corners are.
void calc_previous_frame_nr(struct opticflow_result_t *result, struct opticflow_t *opticflow, uint8_t current_frame_nr, uint8_t *previous_frame_offset, uint8_t *previous_frame_nr)
Calc_previous_frame_nr; adaptive Time Horizon.
Definition: edge_flow.c:39
#define OPTICFLOW_SUBPIXEL_FACTOR
int16_t flow_x
Flow in x direction from the camera (in subpixels) with X positive to the right.
#define OPTICFLOW_MAX_ITERATIONS
#define DISP_RANGE_MAX
Definition: edge_flow.h:52
signed char int8_t
Definition: types.h:15
uint8_t pyramid_level
Number of pyramid levels used in Lucas Kanade algorithm (0 == no pyramids used)
struct FloatEulers eulers
Euler Angles at time of image.
Definition: image.h:48
float noise_measurement
noise of measurement, for state filter
float actfast_short_step
Step size to take when there is an edge to be followed.
#define FAST9_LOW_THRESHOLD
void fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners, uint16_t *ret_corners_length, struct point_t **ret_corners, uint16_t *roi)
Do a FAST9 corner detection.
Definition: fast_rosten.c:52
int16_t flow_y
The y direction flow in subpixels.
Definition: image.h:69
float actfast_long_step
Step size to take when there is no texture.
uint16_t search_distance
Search distance for blockmatching alg.
float divergence
Divergence as determined with a linear flow fit.
float derotation_correction_factor_y
Correction factor for derotation in Y axis, determined from a fit from the gyros and flow rotation...
float surface_roughness
Surface roughness as determined with a linear optical flow fit.
struct timeval frame_time
Definition: edge_flow.h:67
struct flow_t * opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint8_t max_points, uint8_t pyramid_level, uint8_t keep_bad_points)
Definition: lucas_kanade.c:73
int32_t * x
Definition: edge_flow.h:65
float agl_dist_value_filtered
Definition: agl_dist.c:35
int16_t flow_der_x
The derotated flow calculation in the x direction (in subpixels)
int actfast_min_gradient
Threshold that decides when there is sufficient texture for edge following.