Paparazzi UAS  v5.15_devel-230-gc96ce27
Paparazzi is a free software Unmanned Aircraft System.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
opticflow_calculator.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2014 Hann Woei Ho
3  * 2015 Freek van Tienen <freek.v.tienen@gmail.com>
4  * 2016 Kimberly McGuire <k.n.mcguire@tudelft.nl
5  *
6  * This file is part of Paparazzi.
7  *
8  * Paparazzi is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * Paparazzi is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with Paparazzi; see the file COPYING. If not, see
20  * <http://www.gnu.org/licenses/>.
21  */
22 
29 #include "std.h"
30 
31 #include <stdio.h>
32 #include <string.h>
33 #include <stdlib.h>
34 
35 // Own Header
36 #include "opticflow_calculator.h"
37 
38 // Computer Vision
39 #include "lib/vision/image.h"
41 #include "lib/vision/fast_rosten.h"
42 #include "lib/vision/act_fast.h"
43 #include "lib/vision/edge_flow.h"
45 #include "size_divergence.h"
46 #include "linear_flow_fit.h"
47 #include "modules/sonar/agl_dist.h"
48 
49 // to get the definition of front_camera / bottom_camera
50 #include BOARD_CONFIG
51 
52 // whether to show the flow and corners:
53 #define OPTICFLOW_SHOW_CORNERS 0
54 
55 #define EXHAUSTIVE_FAST 0
56 #define ACT_FAST 1
57 // TODO: these are now adapted, but perhaps later could be a setting:
60 
61 // What methods are run to determine divergence, lateral flow, etc.
62 // SIZE_DIV looks at line sizes and only calculates divergence
63 #define SIZE_DIV 1
64 // LINEAR_FIT makes a linear optical flow field fit and extracts a lot of information:
65 // relative velocities in x, y, z (divergence / time to contact), the slope of the surface, and the surface roughness.
66 #define LINEAR_FIT 1
67 
68 #ifndef OPTICFLOW_CORNER_METHOD
69 #define OPTICFLOW_CORNER_METHOD ACT_FAST
70 #endif
71 PRINT_CONFIG_VAR(OPTICFLOW_CORNER_METHOD)
72 
73 /* Set the default values */
74 #ifndef OPTICFLOW_MAX_TRACK_CORNERS
75 #define OPTICFLOW_MAX_TRACK_CORNERS 25
76 #endif
77 PRINT_CONFIG_VAR(OPTICFLOW_MAX_TRACK_CORNERS)
78 
79 #ifndef OPTICFLOW_WINDOW_SIZE
80 #define OPTICFLOW_WINDOW_SIZE 10
81 #endif
82 PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE)
83 
84 #ifndef OPTICFLOW_SEARCH_DISTANCE
85 #define OPTICFLOW_SEARCH_DISTANCE 20
86 #endif
87 PRINT_CONFIG_VAR(OPTICFLOW_SEARCH_DISTANCE)
88 
89 #ifndef OPTICFLOW_SUBPIXEL_FACTOR
90 #define OPTICFLOW_SUBPIXEL_FACTOR 10
91 #endif
92 PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR)
93 
94 #ifndef OPTICFLOW_RESOLUTION_FACTOR
95 #define OPTICFLOW_RESOLUTION_FACTOR 100
96 #endif
97 PRINT_CONFIG_VAR(OPTICFLOW_RESOLUTION_FACTOR)
98 
99 #ifndef OPTICFLOW_MAX_ITERATIONS
100 #define OPTICFLOW_MAX_ITERATIONS 10
101 #endif
102 PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS)
103 
104 #ifndef OPTICFLOW_THRESHOLD_VEC
105 #define OPTICFLOW_THRESHOLD_VEC 2
106 #endif
107 PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC)
108 
109 #ifndef OPTICFLOW_PYRAMID_LEVEL
110 #define OPTICFLOW_PYRAMID_LEVEL 2
111 #endif
112 PRINT_CONFIG_VAR(OPTICFLOW_PYRAMID_LEVEL)
113 
114 #ifndef OPTICFLOW_FAST9_ADAPTIVE
115 #define OPTICFLOW_FAST9_ADAPTIVE TRUE
116 #endif
117 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE)
118 
119 #ifndef OPTICFLOW_FAST9_THRESHOLD
120 #define OPTICFLOW_FAST9_THRESHOLD 20
121 #endif
122 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD)
123 
124 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE
125 #define OPTICFLOW_FAST9_MIN_DISTANCE 10
126 #endif
127 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE)
128 
129 #ifndef OPTICFLOW_FAST9_PADDING
130 #define OPTICFLOW_FAST9_PADDING 20
131 #endif
132 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_PADDING)
133 
134 // thresholds FAST9 that are currently not set from the GCS:
135 #define FAST9_LOW_THRESHOLD 5
136 #define FAST9_HIGH_THRESHOLD 60
137 
138 
139 #ifndef OPTICFLOW_METHOD
140 #define OPTICFLOW_METHOD 0
141 #endif
142 PRINT_CONFIG_VAR(OPTICFLOW_METHOD)
143 
144 #if OPTICFLOW_METHOD > 1
145 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected
146 #endif
147 
148 #ifndef OPTICFLOW_DEROTATION
149 #define OPTICFLOW_DEROTATION TRUE
150 #endif
151 PRINT_CONFIG_VAR(OPTICFLOW_DEROTATION)
152 
153 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
154 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X 1.0
155 #endif
157 
158 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
159 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y 1.0
160 #endif
162 
163 #ifndef OPTICFLOW_MEDIAN_FILTER
164 #define OPTICFLOW_MEDIAN_FILTER FALSE
165 #endif
166 PRINT_CONFIG_VAR(OPTICFLOW_MEDIAN_FILTER)
167 
168 #ifndef OPTICFLOW_FEATURE_MANAGEMENT
169 #define OPTICFLOW_FEATURE_MANAGEMENT 0
170 #endif
171 PRINT_CONFIG_VAR(OPTICFLOW_FEATURE_MANAGEMENT)
172 
173 #ifndef OPTICFLOW_FAST9_REGION_DETECT
174 #define OPTICFLOW_FAST9_REGION_DETECT 1
175 #endif
176 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_REGION_DETECT)
177 
178 #ifndef OPTICFLOW_FAST9_NUM_REGIONS
179 #define OPTICFLOW_FAST9_NUM_REGIONS 9
180 #endif
181 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_NUM_REGIONS)
182 
183 #ifndef OPTICFLOW_ACTFAST_LONG_STEP
184 #define OPTICFLOW_ACTFAST_LONG_STEP 10
185 #endif
186 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_LONG_STEP)
187 
188 #ifndef OPTICFLOW_ACTFAST_SHORT_STEP
189 #define OPTICFLOW_ACTFAST_SHORT_STEP 2
190 #endif
191 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_SHORT_STEP)
192 
193 #ifndef OPTICFLOW_ACTFAST_GRADIENT_METHOD
194 #define OPTICFLOW_ACTFAST_GRADIENT_METHOD 1
195 #endif
196 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_GRADIENT_METHOD)
197 
198 #ifndef OPTICFLOW_ACTFAST_MIN_GRADIENT
199 #define OPTICFLOW_ACTFAST_MIN_GRADIENT 10
200 #endif
201 PRINT_CONFIG_VAR(OPTICFLOW_ACTFAST_MIN_GRADIENT)
202 
203 // Defaults for ARdrone
204 #ifndef OPTICFLOW_BODY_TO_CAM_PHI
205 #define OPTICFLOW_BODY_TO_CAM_PHI 0
206 #endif
207 #ifndef OPTICFLOW_BODY_TO_CAM_THETA
208 #define OPTICFLOW_BODY_TO_CAM_THETA 0
209 #endif
210 #ifndef OPTICFLOW_BODY_TO_CAM_PSI
211 #define OPTICFLOW_BODY_TO_CAM_PSI -M_PI_2
212 #endif
213 
214 // Tracking back flow to make the accepted flow vectors more robust:
215 // Default is false, as it does take extra processing time
216 #ifndef OPTICFLOW_TRACK_BACK
217 #define OPTICFLOW_TRACK_BACK FALSE
218 #endif
219 PRINT_CONFIG_VAR(OPTICFLOW_TRACK_BACK)
220 
221 // Whether to draw the flow on the image:
222 // False by default, since it changes the image and costs time.
223 #ifndef OPTICFLOW_SHOW_FLOW
224 #define OPTICFLOW_SHOW_FLOW FALSE
225 #endif
226 PRINT_CONFIG_VAR(OPTICFLOW_SHOW_FLOW)
227 
228 
229 
230 //Include median filter
231 #include "filters/median_filter.h"
234 
235 /* Functions only used here */
236 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime);
237 static int cmp_flow(const void *a, const void *b);
238 static int cmp_array(const void *a, const void *b);
239 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow,
240  struct opticflow_result_t *result);
241 
242 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
243  float theta_diff, float psi_diff, struct opticflow_t *opticflow);
249 {
250  /* Set the default values */
251  opticflow->method = OPTICFLOW_METHOD; //0 = LK_fast9, 1 = Edgeflow
252  opticflow->window_size = OPTICFLOW_WINDOW_SIZE;
254  opticflow->derotation = OPTICFLOW_DEROTATION; //0 = OFF, 1 = ON
257  opticflow->track_back = OPTICFLOW_TRACK_BACK;
258  opticflow->show_flow = OPTICFLOW_SHOW_FLOW;
261  if (opticflow->subpixel_factor == 0) {
262  opticflow->subpixel_factor = 10;
263  }
272 
277  opticflow->fast9_rsize = FAST9_MAX_CORNERS;
278  opticflow->fast9_ret_corners = calloc(opticflow->fast9_rsize, sizeof(struct point_t));
279 
285 
288 
289 }
299  struct opticflow_result_t *result)
300 {
301  if (opticflow->just_switched_method) {
302  // Create the image buffers
303  image_create(&opticflow->img_gray, img->w, img->h, IMAGE_GRAYSCALE);
304  image_create(&opticflow->prev_img_gray, img->w, img->h, IMAGE_GRAYSCALE);
305 
306  // Set the previous values
307  opticflow->got_first_img = false;
308 
309  // Init median filters with zeros
311  }
312 
313  // Convert image to grayscale
314  image_to_grayscale(img, &opticflow->img_gray);
315 
316  if (!opticflow->got_first_img) {
317  image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
318  opticflow->got_first_img = true;
319  return false;
320  }
321 
322  // variables for linear flow fit:
323  float error_threshold;
324  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
325  struct linear_flow_fit_info fit_info;
326 
327  // Update FPS for information
328  float dt = timeval_diff(&(opticflow->prev_img_gray.ts), &(img->ts));
329  if (dt > 1e-5) {
330  result->fps = 1000.f / dt;
331  } else {
332  return false;
333  }
334 
335  // *************************************************************************************
336  // Corner detection
337  // *************************************************************************************
338 
339  // if feature_management is selected and tracked corners drop below a threshold, redetect
340  if ((opticflow->feature_management) && (result->corner_cnt < opticflow->max_track_corners / 2)) {
341  manage_flow_features(img, opticflow, result);
342  } else if (!opticflow->feature_management) {
343  // needs to be set to 0 because result is now static
344  result->corner_cnt = 0;
345 
346  if (opticflow->corner_method == EXHAUSTIVE_FAST) {
347  // FAST corner detection
348  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
349  // to 0 (see defines), however a more permanent solution should be considered
350  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
351  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
352  &opticflow->fast9_rsize,
353  &opticflow->fast9_ret_corners,
354  NULL);
355 
356  } else if (opticflow->corner_method == ACT_FAST) {
357  // ACT-FAST corner detection:
358  act_fast(&opticflow->prev_img_gray, opticflow->fast9_threshold, &result->corner_cnt,
360  opticflow->actfast_long_step, opticflow->actfast_short_step, opticflow->actfast_min_gradient,
361  opticflow->actfast_gradient_method);
362  }
363 
364  // Adaptive threshold
365  if (opticflow->fast9_adaptive) {
366 
367  // This works well for exhaustive FAST, but drives the threshold to the minimum for ACT-FAST:
368  // Decrease and increase the threshold based on previous values
369  if (result->corner_cnt < 40) { // TODO: Replace 40 with OPTICFLOW_MAX_TRACK_CORNERS / 2
370  // make detections easier:
371  if (opticflow->fast9_threshold > FAST9_LOW_THRESHOLD) {
372  opticflow->fast9_threshold--;
373  }
374 
375  if (opticflow->corner_method == ACT_FAST && n_agents < opticflow->fast9_rsize) {
376  n_time_steps++;
377  n_agents++;
378  }
379 
380  } else if (result->corner_cnt > OPTICFLOW_MAX_TRACK_CORNERS * 2) {
381 
382  if (opticflow->fast9_threshold < FAST9_HIGH_THRESHOLD) {
383  opticflow->fast9_threshold++;
384  }
385 
386  if (opticflow->corner_method == ACT_FAST && n_time_steps > 5 && n_agents > 10) {
387  n_time_steps--;
388  n_agents--;
389  }
390  }
391  }
392  }
393 
394 #if OPTICFLOW_SHOW_CORNERS
395  image_show_points(img, opticflow->fast9_ret_corners, result->corner_cnt);
396 #endif
397 
398  // Check if we found some corners to track
399  if (result->corner_cnt < 1) {
400  // Clear the result otherwise the previous values will be returned for this frame too
401  VECT3_ASSIGN(result->vel_cam, 0, 0, 0);
402  VECT3_ASSIGN(result->vel_body, 0, 0, 0);
403  result->div_size = 0; result->divergence = 0;
404  result->noise_measurement = 5.0;
405 
406  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
407  return false;
408  }
409 
410  // *************************************************************************************
411  // Corner Tracking
412  // *************************************************************************************
413 
414  // Execute a Lucas Kanade optical flow
415  result->tracked_cnt = result->corner_cnt;
416  uint8_t keep_bad_points = 0;
417  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, opticflow->fast9_ret_corners,
418  &result->tracked_cnt,
419  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
420  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level, keep_bad_points);
421 
422 
423  if (opticflow->track_back) {
424  // TODO: Watch out!
425  // We track the flow back and give badly back-tracked vectors a high error,
426  // but we do not yet remove these vectors, nor use the errors in any other function than showing the flow.
427 
428  // initialize corners at the tracked positions:
429  for (int i = 0; i < result->tracked_cnt; i++) {
430  opticflow->fast9_ret_corners[i].x = (uint32_t)(vectors[i].pos.x + vectors[i].flow_x) / opticflow->subpixel_factor;
431  opticflow->fast9_ret_corners[i].y = (uint32_t)(vectors[i].pos.y + vectors[i].flow_y) / opticflow->subpixel_factor;
432  }
433 
434  // present the images in the opposite order:
435  keep_bad_points = 1;
436  uint16_t back_track_cnt = result->tracked_cnt;
437  struct flow_t *back_vectors = opticFlowLK(&opticflow->prev_img_gray, &opticflow->img_gray, opticflow->fast9_ret_corners,
438  &back_track_cnt,
439  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
440  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level, keep_bad_points);
441 
442  // printf("Tracked %d points back.\n", back_track_cnt);
443  int32_t back_x, back_y, diff_x, diff_y, dist_squared;
444  int32_t back_track_threshold = 200;
445 
446  for (int i = 0; i < result->tracked_cnt; i++) {
447  if (back_vectors[i].error < LARGE_FLOW_ERROR) {
448  back_x = (int32_t)(back_vectors[i].pos.x + back_vectors[i].flow_x);
449  back_y = (int32_t)(back_vectors[i].pos.y + back_vectors[i].flow_y);
450  diff_x = back_x - vectors[i].pos.x;
451  diff_y = back_y - vectors[i].pos.y;
452  dist_squared = diff_x * diff_x + diff_y * diff_y;
453  // printf("Vector %d: x,y = %d, %d, back x, y = %d, %d, back tracking error %d\n", i, vectors[i].pos.x, vectors[i].pos.y, back_x, back_y, dist_squared);
454  if (dist_squared > back_track_threshold) {
455  vectors[i].error = LARGE_FLOW_ERROR;
456  }
457  } else {
458  vectors[i].error = LARGE_FLOW_ERROR;
459  }
460  }
461 
462  free(back_vectors);
463  }
464 
465  if (opticflow->show_flow) {
466  uint8_t color[4] = {0, 0, 0, 0};
467  uint8_t bad_color[4] = {0, 0, 0, 0};
468  image_show_flow_color(img, vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
469  }
470 
471  static int n_samples = 100;
472  // Estimate size divergence:
473  if (SIZE_DIV) {
474  result->div_size = get_size_divergence(vectors, result->tracked_cnt, n_samples);// * result->fps;
475  } else {
476  result->div_size = 0.0f;
477  }
478 
479  if (LINEAR_FIT) {
480  // Linear flow fit (normally derotation should be performed before):
481  error_threshold = 10.0f;
482  n_iterations_RANSAC = 20;
483  n_samples_RANSAC = 5;
484  success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
485  n_samples_RANSAC, img->w, img->h, &fit_info);
486 
487  if (!success_fit) {
488  fit_info.divergence = 0.0f;
489  fit_info.surface_roughness = 0.0f;
490  }
491 
492  result->divergence = fit_info.divergence;
493  result->surface_roughness = fit_info.surface_roughness;
494  } else {
495  result->divergence = 0.0f;
496  result->surface_roughness = 0.0f;
497  }
498 
499  // Get the median flow
500  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
501  if (result->tracked_cnt == 0) {
502  // We got no flow
503  result->flow_x = 0;
504  result->flow_y = 0;
505 
506  free(vectors);
507  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
508  return false;
509  } else if (result->tracked_cnt % 2) {
510  // Take the median point
511  result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
512  result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
513  } else {
514  // Take the average of the 2 median points
515  result->flow_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
516  result->flow_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
517  }
518 
519  // TODO scale flow to rad/s here
520 
521  // ***************
522  // Flow Derotation
523  // ***************
524 
525  float diff_flow_x = 0.f;
526  float diff_flow_y = 0.f;
527 
528  if (opticflow->derotation && result->tracked_cnt > 5) {
529 
530  float rotation_threshold = M_PI / 180.0f;
531  if (fabs(opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi) > rotation_threshold
532  || fabs(opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta) > rotation_threshold) {
533 
534  // do not apply the derotation if the rotation rates are too high:
535  result->flow_der_x = 0.0f;
536  result->flow_der_y = 0.0f;
537 
538  } else {
539 
540  // determine the roll, pitch, yaw differencces between the images.
541  float phi_diff = opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi;
542  float theta_diff = opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta;
543  float psi_diff = opticflow->img_gray.eulers.psi - opticflow->prev_img_gray.eulers.psi;
544 
545  if (strcmp(OPTICFLOW_CAMERA.dev_name, bottom_camera.dev_name) == 0) {
546  // bottom cam: just subtract a scaled version of the roll and pitch difference from the global flow vector:
547  diff_flow_x = phi_diff * OPTICFLOW_CAMERA.camera_intrinsics.focal_x; // phi_diff works better than (cam_state->rates.p)
548  diff_flow_y = theta_diff * OPTICFLOW_CAMERA.camera_intrinsics.focal_y;
549  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
551  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
553  } else {
554  // frontal cam, predict individual flow vectors:
555  struct flow_t *predicted_flow_vectors = predict_flow_vectors(vectors, result->tracked_cnt, phi_diff, theta_diff,
556  psi_diff, opticflow);
557  if (opticflow->show_flow) {
558  uint8_t color[4] = {255, 255, 255, 255};
559  uint8_t bad_color[4] = {255, 255, 255, 255};
560  image_show_flow_color(img, predicted_flow_vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
561  }
562 
563  for (int i = 0; i < result->tracked_cnt; i++) {
564  // subtract the flow:
565  vectors[i].flow_x -= predicted_flow_vectors[i].flow_x;
566  vectors[i].flow_y -= predicted_flow_vectors[i].flow_y;
567  }
568 
569  // vectors have to be re-sorted after derotation:
570  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
571 
572  if (result->tracked_cnt % 2) {
573  // Take the median point
574  result->flow_der_x = vectors[result->tracked_cnt / 2].flow_x;
575  result->flow_der_y = vectors[result->tracked_cnt / 2].flow_y;
576  } else {
577  // Take the average of the 2 median points
578  result->flow_der_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
579  result->flow_der_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
580  }
581  }
582  }
583  }
584 
585  // Velocity calculation
586  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
587  // TODO: Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
588  // TODO: This is actually only correct for the bottom camera:
589  result->vel_cam.x = (float)result->flow_der_x * result->fps * agl_dist_value_filtered /
590  (opticflow->subpixel_factor * OPTICFLOW_CAMERA.camera_intrinsics.focal_x);
591  result->vel_cam.y = (float)result->flow_der_y * result->fps * agl_dist_value_filtered /
592  (opticflow->subpixel_factor * OPTICFLOW_CAMERA.camera_intrinsics.focal_y);
593  result->vel_cam.z = result->divergence * result->fps * agl_dist_value_filtered;
594 
595  //Apply a median filter to the velocity if wanted
596  if (opticflow->median_filter == true) {
598  }
599 
600  // Determine quality of noise measurement for state filter
601  //TODO develop a noise model based on groundtruth
602  //result->noise_measurement = 1 - (float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25f);
603  result->noise_measurement = 0.25;
604 
605  // *************************************************************************************
606  // Next Loop Preparation
607  // *************************************************************************************
608  if (opticflow->feature_management) {
609  result->corner_cnt = result->tracked_cnt;
610  //get the new positions of the corners and the "residual" subpixel positions
611  for (uint16_t i = 0; i < result->tracked_cnt; i++) {
612  opticflow->fast9_ret_corners[i].x = (uint32_t)((vectors[i].pos.x + (float)vectors[i].flow_x) /
613  opticflow->subpixel_factor);
614  opticflow->fast9_ret_corners[i].y = (uint32_t)((vectors[i].pos.y + (float)vectors[i].flow_y) /
615  opticflow->subpixel_factor);
616  opticflow->fast9_ret_corners[i].x_sub = (uint16_t)((vectors[i].pos.x + vectors[i].flow_x) % opticflow->subpixel_factor);
617  opticflow->fast9_ret_corners[i].y_sub = (uint16_t)((vectors[i].pos.y + vectors[i].flow_y) % opticflow->subpixel_factor);
618  opticflow->fast9_ret_corners[i].count = vectors[i].pos.count;
619  }
620  }
621  free(vectors);
622  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
623 
624  return true;
625 }
626 
627 /*
628  * Predict flow vectors by means of the rotation rates:
629  */
630 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
631  float theta_diff, float psi_diff, struct opticflow_t *opticflow)
632 {
633 
634  // reserve memory for the predicted flow vectors:
635  struct flow_t *predicted_flow_vectors = malloc(sizeof(struct flow_t) * n_points);
636 
637  float K[9] = {OPTICFLOW_CAMERA.camera_intrinsics.focal_x, 0.0f, OPTICFLOW_CAMERA.camera_intrinsics.center_x,
638  0.0f, OPTICFLOW_CAMERA.camera_intrinsics.focal_y, OPTICFLOW_CAMERA.camera_intrinsics.center_y,
639  0.0f, 0.0f, 1.0f
640  };
641  // TODO: make an option to not do distortion / undistortion (Dhane_k = 1)
642  float k = OPTICFLOW_CAMERA.camera_intrinsics.Dhane_k;
643 
644  float A, B, C; // as in Longuet-Higgins
645 
646  if (strcmp(OPTICFLOW_CAMERA.dev_name, front_camera.dev_name) == 0) {
647  // specific for the x,y swapped Bebop 2 images:
648  A = -psi_diff;
649  B = theta_diff;
650  C = phi_diff;
651  } else {
652  A = theta_diff;
653  B = phi_diff;
654  C = psi_diff;
655  }
656 
657  float x_n, y_n;
658  float x_n_new, y_n_new, x_pix_new, y_pix_new;
659  float predicted_flow_x, predicted_flow_y;
660  for (uint16_t i = 0; i < n_points; i++) {
661  // the from-coordinate is always the same:
662  predicted_flow_vectors[i].pos.x = flow_vectors[i].pos.x;
663  predicted_flow_vectors[i].pos.y = flow_vectors[i].pos.y;
664 
665  bool success = distorted_pixels_to_normalized_coords((float)flow_vectors[i].pos.x / opticflow->subpixel_factor,
666  (float)flow_vectors[i].pos.y / opticflow->subpixel_factor, &x_n, &y_n, k, K);
667  if (success) {
668  // predict flow as in a linear pinhole camera model:
669  predicted_flow_x = A * x_n * y_n - B * x_n * x_n - B + C * y_n;
670  predicted_flow_y = -C * x_n + A + A * y_n * y_n - B * x_n * y_n;
671 
672  x_n_new = x_n + predicted_flow_x;
673  y_n_new = y_n + predicted_flow_y;
674 
675  success = normalized_coords_to_distorted_pixels(x_n_new, y_n_new, &x_pix_new, &y_pix_new, k, K);
676 
677  if (success) {
678  predicted_flow_vectors[i].flow_x = (int16_t)(x_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.x);
679  predicted_flow_vectors[i].flow_y = (int16_t)(y_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.y);
680  predicted_flow_vectors[i].error = 0;
681  } else {
682  predicted_flow_vectors[i].flow_x = 0;
683  predicted_flow_vectors[i].flow_y = 0;
684  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
685  }
686  } else {
687  predicted_flow_vectors[i].flow_x = 0;
688  predicted_flow_vectors[i].flow_y = 0;
689  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
690  }
691  }
692  return predicted_flow_vectors;
693 }
694 
695 
696 /* manage_flow_features - Update list of corners to be tracked by LK
697  * Remembers previous points and tries to find new points in less dense
698  * areas of the image first.
699  *
700  */
701 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
702 {
703  // first check if corners have not moved too close together due to flow:
704  int16_t c1 = 0;
705  while (c1 < (int16_t)result->corner_cnt - 1) {
706  bool exists = false;
707  for (int16_t i = c1 + 1; i < result->corner_cnt; i++) {
708  if (abs((int16_t)opticflow->fast9_ret_corners[c1].x - (int16_t)opticflow->fast9_ret_corners[i].x) <
709  opticflow->fast9_min_distance / 2
710  && abs((int16_t)opticflow->fast9_ret_corners[c1].y - (int16_t)opticflow->fast9_ret_corners[i].y) <
711  opticflow->fast9_min_distance / 2) {
712  // if too close, replace the corner with the last one in the list:
713  opticflow->fast9_ret_corners[c1].x = opticflow->fast9_ret_corners[result->corner_cnt - 1].x;
714  opticflow->fast9_ret_corners[c1].y = opticflow->fast9_ret_corners[result->corner_cnt - 1].y;
715  opticflow->fast9_ret_corners[c1].count = opticflow->fast9_ret_corners[result->corner_cnt - 1].count;
716  opticflow->fast9_ret_corners[c1].x_sub = opticflow->fast9_ret_corners[result->corner_cnt - 1].x_sub;
717  opticflow->fast9_ret_corners[c1].y_sub = opticflow->fast9_ret_corners[result->corner_cnt - 1].y_sub;
718 
719  // decrease the number of corners:
720  result->corner_cnt--;
721  exists = true;
722  // no further checking required for the removed corner
723  break;
724  }
725  }
726  // if the corner has been replaced, the new corner in position c1 has to be checked again:
727  if (!exists) { c1++; }
728  }
729 
730  // no need for "per region" re-detection when there are no previous corners
731  if ((!opticflow->fast9_region_detect) || (result->corner_cnt == 0)) {
732  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
733  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
734  &opticflow->fast9_rsize,
735  &opticflow->fast9_ret_corners,
736  NULL);
737  } else {
738  // allocating memory and initializing the 2d array that holds the number of corners per region and its index (for the sorting)
739  uint16_t **region_count = calloc(opticflow->fast9_num_regions, sizeof(uint16_t *));
740  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
741  region_count[i] = calloc(2, sizeof(uint16_t));
742  region_count[i][0] = 0;
743  region_count[i][1] = i;
744  }
745  uint16_t root_regions = (uint16_t)sqrtf((float)opticflow->fast9_num_regions);
746  int region_index;
747  for (uint16_t i = 0; i < result->corner_cnt; i++) {
748  region_index = (opticflow->fast9_ret_corners[i].x * root_regions / img->w
749  + root_regions * (opticflow->fast9_ret_corners[i].y * root_regions / img->h));
750  region_index = (region_index < opticflow->fast9_num_regions) ? region_index : opticflow->fast9_num_regions - 1;
751  region_count[region_index][0]++;
752  }
753 
754  //sorting region_count array according to first column (number of corners).
755  qsort(region_count, opticflow->fast9_num_regions, sizeof(region_count[0]), cmp_array);
756 
757  uint16_t roi[4];
758  // Detecting corners from the region with the less to the one with the most, until a desired total is reached.
759  for (uint16_t i = 0; i < opticflow->fast9_num_regions && result->corner_cnt < 2 * opticflow->max_track_corners; i++) {
760  // Find the boundaries of the region of interest
761  roi[0] = (region_count[i][1] % root_regions) * (img->w / root_regions);
762  roi[1] = (region_count[i][1] / root_regions) * (img->h / root_regions);
763  roi[2] = roi[0] + (img->w / root_regions);
764  roi[3] = roi[1] + (img->h / root_regions);
765 
766  struct point_t *new_corners = calloc(opticflow->fast9_rsize, sizeof(struct point_t));
767  uint16_t new_count = 0;
768 
769  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
770  opticflow->fast9_padding, opticflow->fast9_padding, &new_count,
771  &opticflow->fast9_rsize, &new_corners, roi);
772 
773  // check that no identified points already exist in list
774  for (uint16_t j = 0; j < new_count; j++) {
775  bool exists = false;
776  for (uint16_t k = 0; k < result->corner_cnt; k++) {
777  if (abs((int16_t)new_corners[j].x - (int16_t)opticflow->fast9_ret_corners[k].x) < (int16_t)opticflow->fast9_min_distance
778  && abs((int16_t)new_corners[j].y - (int16_t)opticflow->fast9_ret_corners[k].y) < (int16_t)
779  opticflow->fast9_min_distance) {
780  exists = true;
781  break;
782  }
783  }
784  if (!exists) {
785  opticflow->fast9_ret_corners[result->corner_cnt].x = new_corners[j].x;
786  opticflow->fast9_ret_corners[result->corner_cnt].y = new_corners[j].y;
787  opticflow->fast9_ret_corners[result->corner_cnt].count = 0;
788  opticflow->fast9_ret_corners[result->corner_cnt].x_sub = 0;
789  opticflow->fast9_ret_corners[result->corner_cnt].y_sub = 0;
790  result->corner_cnt++;
791 
792  if (result->corner_cnt >= opticflow->fast9_rsize) {
793  break;
794  }
795  }
796  }
797 
798  free(new_corners);
799  }
800  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
801  free(region_count[i]);
802  }
803  free(region_count);
804  }
805 }
806 
815 bool calc_edgeflow_tot(struct opticflow_t *opticflow, struct image_t *img,
816  struct opticflow_result_t *result)
817 {
818  // Define Static Variables
819  static struct edge_hist_t edge_hist[MAX_HORIZON];
820  static uint8_t current_frame_nr = 0;
821  struct edge_flow_t edgeflow;
822  static uint8_t previous_frame_offset[2] = {1, 1};
823 
824  // Define Normal variables
825  struct edgeflow_displacement_t displacement;
826  displacement.x = calloc(img->w, sizeof(int32_t));
827  displacement.y = calloc(img->h, sizeof(int32_t));
828 
829  // If the methods just switched to this one, reintialize the
830  // array of edge_hist structure.
831  if (opticflow->just_switched_method == 1 && edge_hist[0].x == NULL) {
832  int i;
833  for (i = 0; i < MAX_HORIZON; i++) {
834  edge_hist[i].x = calloc(img->w, sizeof(int32_t));
835  edge_hist[i].y = calloc(img->h, sizeof(int32_t));
836  FLOAT_EULERS_ZERO(edge_hist[i].eulers);
837  }
838  }
839 
840  uint16_t disp_range;
841  if (opticflow->search_distance < DISP_RANGE_MAX) {
842  disp_range = opticflow->search_distance;
843  } else {
844  disp_range = DISP_RANGE_MAX;
845  }
846 
847  uint16_t window_size;
848 
849  if (opticflow->window_size < MAX_WINDOW_SIZE) {
850  window_size = opticflow->window_size;
851  } else {
852  window_size = MAX_WINDOW_SIZE;
853  }
854 
855  uint16_t RES = opticflow->resolution_factor;
856 
857  //......................Calculating EdgeFlow..................... //
858 
859  // Calculate current frame's edge histogram
860  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
861  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
862  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
863  calculate_edge_histogram(img, edge_hist_y, 'y', 0);
864 
865 
866  // Copy frame time and angles of image to calculated edge histogram
867  edge_hist[current_frame_nr].frame_time = img->ts;
868  edge_hist[current_frame_nr].eulers = img->eulers;
869 
870  // Calculate which previous edge_hist to compare with the current
871  uint8_t previous_frame_nr[2];
872  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);
873 
874  //Select edge histogram from the previous frame nr
875  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
876  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;
877 
878  //Calculate the corresponding derotation of the two frames
879  int16_t der_shift_x = 0;
880  int16_t der_shift_y = 0;
881 
882  if (opticflow->derotation) {
883  der_shift_x = (int16_t)((edge_hist[current_frame_nr].eulers.phi - edge_hist[previous_frame_nr[0]].eulers.phi) *
884  OPTICFLOW_CAMERA.camera_intrinsics.focal_x * opticflow->derotation_correction_factor_x);
885  der_shift_y = (int16_t)((edge_hist[current_frame_nr].eulers.theta - edge_hist[previous_frame_nr[1]].eulers.theta) *
886  OPTICFLOW_CAMERA.camera_intrinsics.focal_y * opticflow->derotation_correction_factor_y);
887  }
888 
889  // Estimate pixel wise displacement of the edge histograms for x and y direction
890  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
891  displacement.x, img->w,
892  window_size, disp_range, der_shift_x);
893  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
894  displacement.y, img->h,
895  window_size, disp_range, der_shift_y);
896 
897  // Fit a line on the pixel displacement to estimate
898  // the global pixel flow and divergence (RES is resolution)
899  line_fit(displacement.x, &edgeflow.div_x,
900  &edgeflow.flow_x, img->w,
901  window_size + disp_range, RES);
902  line_fit(displacement.y, &edgeflow.div_y,
903  &edgeflow.flow_y, img->h,
904  window_size + disp_range, RES);
905 
906  /* Save Resulting flow in results
907  * Warning: The flow detected here is different in sign
908  * and size, therefore this will be divided with
909  * the same subpixel factor and multiplied by -1 to make it
910  * on par with the LK algorithm in opticalflow_calculator.c
911  * */
912  edgeflow.flow_x = -1 * edgeflow.flow_x;
913  edgeflow.flow_y = -1 * edgeflow.flow_y;
914 
915  edgeflow.flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
916  edgeflow.flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];
917 
918  result->flow_x = (int16_t)edgeflow.flow_x / RES;
919  result->flow_y = (int16_t)edgeflow.flow_y / RES;
920 
921  //Fill up the results optic flow to be on par with LK_fast9
922  result->flow_der_x = result->flow_x;
923  result->flow_der_y = result->flow_y;
924  result->corner_cnt = getAmountPeaks(edge_hist_x, 500, img->w);
925  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500, img->w);
926  result->divergence = -1.0 * (float)edgeflow.div_x /
927  RES; // Also multiply the divergence with -1.0 to make it on par with the LK algorithm of
928  result->div_size =
929  result->divergence; // Fill the div_size with the divergence to atleast get some divergenge measurement when switching from LK to EF
930  result->surface_roughness = 0.0f;
931 
932  //......................Calculating VELOCITY ..................... //
933 
934  /*Estimate fps per direction
935  * This is the fps with adaptive horizon for subpixel flow, which is not similar
936  * to the loop speed of the algorithm. The faster the quadcopter flies
937  * the higher it becomes
938  */
939  float fps_x = 0;
940  float fps_y = 0;
941  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
942  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
943  fps_x = 1 / (time_diff_x);
944  fps_y = 1 / (time_diff_y);
945 
946  result->fps = fps_x;
947 
948  // TODO scale flow to rad/s here
949 
950  // Calculate velocity
951  result->vel_cam.x = edgeflow.flow_x * fps_x * agl_dist_value_filtered * OPTICFLOW_CAMERA.camera_intrinsics.focal_x /
952  RES;
953  result->vel_cam.y = edgeflow.flow_y * fps_y * agl_dist_value_filtered * OPTICFLOW_CAMERA.camera_intrinsics.focal_y /
954  RES;
955  result->vel_cam.z = result->divergence * fps_x * agl_dist_value_filtered;
956 
957  //Apply a median filter to the velocity if wanted
958  if (opticflow->median_filter == true) {
960  }
961 
962  result->noise_measurement = 0.2;
963 
964 #if OPTICFLOW_SHOW_FLOW
965  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
966 #endif
967  // Increment and wrap current time frame
968  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
969 
970  // Free alloc'd variables
971  free(displacement.x);
972  free(displacement.y);
973 
974  return true;
975 }
976 
977 
986  struct opticflow_result_t *result)
987 {
988  bool flow_successful = false;
989  // A switch counter that checks in the loop if the current method is similar,
990  // to the previous (for reinitializing structs)
991  static int8_t switch_counter = -1;
992  if (switch_counter != opticflow->method) {
993  opticflow->just_switched_method = true;
994  switch_counter = opticflow->method;
995  // Clear the static result
996  memset(result, 0, sizeof(struct opticflow_result_t));
997  } else {
998  opticflow->just_switched_method = false;
999  }
1000 
1001  // Switch between methods (0 = fast9/lukas-kanade, 1 = EdgeFlow)
1002  if (opticflow->method == 0) {
1003  flow_successful = calc_fast9_lukas_kanade(opticflow, img, result);
1004  } else if (opticflow->method == 1) {
1005  flow_successful = calc_edgeflow_tot(opticflow, img, result);
1006  }
1007 
1008  /* Rotate velocities from camera frame coordinates to body coordinates for control
1009  * IMPORTANT!!! This frame to body orientation should be the case for the Parrot
1010  * ARdrone and Bebop, however this can be different for other quadcopters
1011  * ALWAYS double check!
1012  */
1013  float_rmat_transp_vmult(&result->vel_body, &body_to_cam, &result->vel_cam);
1014 
1015  return flow_successful;
1016 }
1017 
1024 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
1025 {
1026  uint32_t msec;
1027  msec = (finishtime->tv_sec - starttime->tv_sec) * 1000;
1028  msec += (finishtime->tv_usec - starttime->tv_usec) / 1000;
1029  return msec;
1030 }
1031 
1039 static int cmp_flow(const void *a, const void *b)
1040 {
1041  const struct flow_t *a_p = (const struct flow_t *)a;
1042  const struct flow_t *b_p = (const struct flow_t *)b;
1043  return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y *
1044  b_p->flow_y);
1045 }
1046 
1054 static int cmp_array(const void *a, const void *b)
1055 {
1056  const uint16_t *pa = (const uint16_t *)a;
1057  const uint16_t *pb = (const uint16_t *)b;
1058  return pa[0] - pb[0];
1059 }
bool track_back
Whether to track flow vectors back to the previous image, in order to check if the back-tracked flow ...
unsigned short uint16_t
Definition: types.h:16
int actfast_gradient_method
Whether to use a simple or Sobel filter.
#define InitMedianFilterVect3Float(_f, _n)
int16_t flow_y
Flow in y direction from the camera (in subpixels) with Y positive to the bottom. ...
uint16_t fast9_min_distance
Minimum distance in pixels between corners.
void calculate_edge_displacement(int32_t *edge_histogram, int32_t *edge_histogram_prev, int32_t *displacement, uint16_t size, uint8_t window, uint8_t disp_range, int32_t der_shift)
Calculate_displacement calculates the displacement between two histograms.
Definition: edge_flow.c:168
int16_t flow_der_y
The derotated flow calculation in the y direction (in subpixels)
#define FAST9_HIGH_THRESHOLD
bool calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with fast9 and lukaskanade on a new image frame.
int32_t flow_x
Definition: edge_flow.h:77
struct FloatRMat body_to_cam
float div_size
Divergence as determined with the size_divergence script.
bool feature_management
Decides whether to keep track corners in memory for the next frame instead of re-detecting every time...
uint8_t max_iterations
The maximum amount of iterations the Lucas Kanade algorithm should do.
#define MAX_WINDOW_SIZE
Definition: edge_flow.h:55
float phi
in radians
#define float_rmat_of_eulers
bool opticflow_calc_frame(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow on a new image frame.
#define OPTICFLOW_TRACK_BACK
struct opticflow_t opticflow
Opticflow calculations.
struct MedianFilter3Float vel_filt
static float K[9]
#define OPTICFLOW_ACTFAST_MIN_GRADIENT
uint16_t window_size
Window size for the blockmatching algorithm (general value for all methods)
#define OPTICFLOW_ACTFAST_SHORT_STEP
calculate optical flow with EdgeFlow
void image_switch(struct image_t *a, struct image_t *b)
This will switch image *a and *b This is faster as image_copy because it doesn't copy the whole image...
Definition: image.c:106
#define OPTICFLOW_SHOW_FLOW
#define FLOAT_EULERS_ZERO(_e)
uint16_t fast9_rsize
Amount of corners allocated.
uint16_t n_agents
#define OPTICFLOW_PYRAMID_LEVEL
Calculate velocity from optic flow.
bool median_filter
Decides to use a median filter on the velocity.
#define VECT3_ASSIGN(_a, _x, _y, _z)
Definition: pprz_algebra.h:125
uint32_t getAmountPeaks(int32_t *edgehist, int32_t thres, int32_t size)
getAmountPeaks, calculates the amount of peaks in a edge histogram
Definition: edge_flow.c:337
#define UpdateMedianFilterVect3Float(_f, _v)
#define LARGE_FLOW_ERROR
Definition: lucas_kanade.h:37
int32_t flow_y
The y direction flow in subpixels.
Definition: image.h:69
#define OPTICFLOW_MEDIAN_FILTER
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
Definition: image.c:43
Definition: image.h:43
uint16_t n_time_steps
#define OPTICFLOW_BODY_TO_CAM_PHI
int32_t div_x
Definition: edge_flow.h:78
uint16_t max_track_corners
Maximum amount of corners Lucas Kanade should track.
void draw_edgeflow_img(struct image_t *img, struct edge_flow_t edgeflow, int32_t *edge_hist_x_prev, int32_t *edge_hist_x)
Draws edgehistogram, displacement and linefit directly on the image for debugging (only for edgeflow ...
Definition: edge_flow.c:297
Definition: image.h:66
float psi
in radians
int32_t * y
Definition: edge_flow.h:66
int32_t flow_y
Definition: edge_flow.h:79
void image_show_flow_color(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor, const uint8_t *color, const uint8_t *bad_color)
Shows the flow from a specific point to a new point This works on YUV422 and Grayscale images...
Definition: image.c:726
#define OPTICFLOW_THRESHOLD_VEC
#define OPTICFLOW_FAST9_MIN_DISTANCE
void image_copy(struct image_t *input, struct image_t *output)
Copy an image from inut to output This will only work if the formats are the same.
Definition: image.c:83
#define OPTICFLOW_FAST9_ADAPTIVE
bool calc_edgeflow_tot(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with EDGEFLOW on a new image frame.
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt)
Show points in an image by coloring them through giving the pixels the maximum value.
Definition: image.c:662
static int cmp_flow(const void *a, const void *b)
Compare two flow vectors based on flow distance Used for sorting.
#define RES
Definition: detect_window.c:27
Calculate divergence from flow vectors by looking at line sizes beteween the points.
struct FloatVect3 vel_body
The velocity in body frame (m/s) with X positive to the front of the aircraft, Y positive to the righ...
uint32_t x
The x coordinate of the point.
Definition: image.h:58
#define OPTICFLOW_ACTFAST_LONG_STEP
uint16_t resolution_factor
The resolution in EdgeFlow to determine the Divergence.
static struct flow_t * predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff, float theta_diff, float psi_diff, struct opticflow_t *opticflow)
uint8_t threshold_vec
The threshold in x, y subpixels which the algorithm should stop.
void calculate_edge_histogram(struct image_t *img, int32_t edge_histogram[], char direction, uint16_t edge_threshold)
Calculate a edge/gradient histogram for each dimension of the image.
Definition: edge_flow.c:88
bool derotation
Derotation switched on or off (depended on the quality of the gyroscope measurement) ...
Finds corners in an image by actively scanning the image.
uint16_t y_sub
The y subpixel coordinate of the point.
Definition: image.h:62
uint8_t method
Method to use to calculate the optical flow.
#define B
euler angles
int n_samples
Definition: detect_gate.c:85
#define bottom_camera
Definition: mt9v117_nps.c:3
uint8_t fast9_num_regions
The number of regions of interest the image is split into.
float theta
in radians
void opticflow_calc_init(struct opticflow_t *opticflow)
Initialize the opticflow calculator.
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
float get_size_divergence(struct flow_t *vectors, int count, int n_samples)
Get divergence from optical flow vectors based on line sizes between corners.
Image helper functions like resizing, color filter, converters...
#define MAX_HORIZON
Definition: edge_flow.h:45
uint16_t tracked_cnt
The amount of tracked corners.
void float_rmat_transp_vmult(struct FloatVect3 *vb, struct FloatRMat *m_b2a, struct FloatVect3 *va)
rotate 3D vector by transposed rotation matrix.
#define OPTICFLOW_RESOLUTION_FACTOR
#define OPTICFLOW_FEATURE_MANAGEMENT
bool normalized_coords_to_distorted_pixels(float x_n, float y_n, float *x_pd, float *y_pd, float k, const float *K)
Transform normalized coordinates to distorted pixel coordinates.
Definition: undistortion.c:146
uint16_t subpixel_factor
The amount of subpixels per pixel.
#define OPTICFLOW_FAST9_PADDING
bool analyze_linear_flow_field(struct flow_t *vectors, int count, float error_threshold, int n_iterations, int n_samples, int im_width, int im_height, struct linear_flow_fit_info *info)
Analyze a linear flow field, retrieving information such as divergence, surface roughness, focus of expansion, etc.
#define OPTICFLOW_CORNER_METHOD
struct point_t * fast9_ret_corners
Corners.
uint16_t w
Image width.
Definition: image.h:45
unsigned long uint32_t
Definition: types.h:18
#define OPTICFLOW_FAST9_THRESHOLD
uint16_t h
Image height.
Definition: image.h:46
struct image_t prev_img_gray
Previous gray image frame.
signed short int16_t
Definition: types.h:17
struct FloatVect3 vel_cam
The velocity in camera frame (m/s)
#define OPTICFLOW_METHOD
static uint16_t c1
Definition: baro_MS5534A.c:203
#define OPTICFLOW_BODY_TO_CAM_PSI
uint8_t fast9_threshold
FAST9 corner detection threshold.
bool just_switched_method
Boolean to check if methods has been switched (for reinitialization)
#define OPTICFLOW_FAST9_REGION_DETECT
void act_fast(struct image_t *img, uint8_t fast_threshold, uint16_t *num_corners, struct point_t **ret_corners, uint16_t n_agents, uint16_t n_time_steps, float long_step, float short_step, int min_gradient, int gradient_method)
Do an ACT-FAST corner detection.
Definition: act_fast.c:49
Bind to agl ABI message and provide a filtered value to be used in flight plans.
struct point_t pos
The original position the flow comes from in subpixels.
Definition: image.h:67
uint32_t y
The y coordinate of the point.
Definition: image.h:59
#define OPTICFLOW_WINDOW_SIZE
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
Definition: image.c:125
Definition: image.h:57
#define EXHAUSTIVE_FAST
signed long int32_t
Definition: types.h:19
uint16_t x_sub
The x subpixel coordinate of the point.
Definition: image.h:61
Functions for undistorting camera images.
struct image_t img_gray
Current gray image frame.
static int cmp_array(const void *a, const void *b)
Compare the rows of an integer (uint16_t) 2D array based on the first column.
uint32_t error
The matching error in the tracking process in subpixels.
Definition: image.h:70
#define OPTICFLOW_FAST9_NUM_REGIONS
#define SIZE_DIV
uint16_t corner_cnt
The amount of coners found by FAST9.
unsigned char uint8_t
Definition: types.h:14
float fps
Frames per second of the optical flow calculation.
efficient fixed-point optical-flow calculation
#define OPTICFLOW_DEROTATION
bool show_flow
Whether to draw the flow vectors on the image. Watch out! This changes the image as will be received ...
struct timeval ts
The timestamp of creation.
Definition: image.h:47
bool got_first_img
If we got a image to work with.
float divergence
Basically, relative_velocity_z. Actual divergence of a 2D flow field is 2 * relative_velocity_z.
struct FloatEulers eulers
Definition: edge_flow.h:68
#define OPTICFLOW_SEARCH_DISTANCE
bool fast9_region_detect
Decides whether to detect fast9 corners in specific regions of interest or the whole image (only for ...
#define LINEAR_FIT
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
#define MEDIAN_DEFAULT_SIZE
Definition: median_filter.h:28
int32_t div_y
Definition: edge_flow.h:80
uint16_t count
Number of times the point has been tracked successfully.
Definition: image.h:60
uint16_t fast9_padding
Padding used in FAST9 detector.
float surface_roughness
The error of the linear fit is a measure of surface roughness.
bool fast9_adaptive
Whether the FAST9 threshold should be adaptive.
rotation matrix
bool distorted_pixels_to_normalized_coords(float x_pd, float y_pd, float *x_n, float *y_n, float k, const float *K)
Transform distorted pixel coordinates to normalized coordinates.
Definition: undistortion.c:128
#define OPTICFLOW_ACTFAST_GRADIENT_METHOD
#define OPTICFLOW_BODY_TO_CAM_THETA
#define ACT_FAST
float derotation_correction_factor_x
Correction factor for derotation in x axis, determined from a fit from the gyros and flow rotation...
#define A
#define FAST9_MAX_CORNERS
static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
Grayscale image with only the Y part (uint8 per pixel)
Definition: image.h:37
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
Calculate the difference from start till finish.
void line_fit(int32_t *displacement, int32_t *divergence, int32_t *flow, uint32_t size, uint32_t border, uint16_t RES)
Fits a linear model to an array with pixel displacements with least squares.
Definition: edge_flow.c:247
#define OPTICFLOW_MAX_TRACK_CORNERS
uint8_t corner_method
Method to use for determining where the corners are.
void calc_previous_frame_nr(struct opticflow_result_t *result, struct opticflow_t *opticflow, uint8_t current_frame_nr, uint8_t *previous_frame_offset, uint8_t *previous_frame_nr)
Calc_previous_frame_nr; adaptive Time Horizon.
Definition: edge_flow.c:39
#define OPTICFLOW_SUBPIXEL_FACTOR
int16_t flow_x
Flow in x direction from the camera (in subpixels) with X positive to the right.
#define OPTICFLOW_MAX_ITERATIONS
#define DISP_RANGE_MAX
Definition: edge_flow.h:52
signed char int8_t
Definition: types.h:15
#define front_camera
Definition: mt9f002_nps.c:3
int32_t flow_x
The x direction flow in subpixels.
Definition: image.h:68
uint8_t pyramid_level
Number of pyramid levels used in Lucas Kanade algorithm (0 == no pyramids used)
struct FloatEulers eulers
Euler Angles at time of image.
Definition: image.h:48
float noise_measurement
noise of measurement, for state filter
float actfast_short_step
Step size to take when there is an edge to be followed.
#define FAST9_LOW_THRESHOLD
void fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners, uint16_t *ret_corners_length, struct point_t **ret_corners, uint16_t *roi)
Do a FAST9 corner detection.
Definition: fast_rosten.c:52
float actfast_long_step
Step size to take when there is no texture.
uint16_t search_distance
Search distance for blockmatching alg.
float divergence
Divergence as determined with a linear flow fit.
float derotation_correction_factor_y
Correction factor for derotation in Y axis, determined from a fit from the gyros and flow rotation...
float surface_roughness
Surface roughness as determined with a linear optical flow fit.
struct timeval frame_time
Definition: edge_flow.h:67
struct flow_t * opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint8_t max_points, uint8_t pyramid_level, uint8_t keep_bad_points)
Definition: lucas_kanade.c:74
int32_t * x
Definition: edge_flow.h:65
float agl_dist_value_filtered
Definition: agl_dist.c:35
int16_t flow_der_x
The derotated flow calculation in the x direction (in subpixels)
int actfast_min_gradient
Threshold that decides when there is sufficient texture for edge following.