Paparazzi UAS  v7.0_unstable
Paparazzi is a free software Unmanned Aircraft System.
opticflow_calculator.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2014 Hann Woei Ho
3  * 2015 Freek van Tienen <freek.v.tienen@gmail.com>
4  * 2016 Kimberly McGuire <k.n.mcguire@tudelft.nl
5  *
6  * This file is part of Paparazzi.
7  *
8  * Paparazzi is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * Paparazzi is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with Paparazzi; see the file COPYING. If not, see
20  * <http://www.gnu.org/licenses/>.
21  */
22 
29 #include "std.h"
30 
31 #include <stdio.h>
32 #include <string.h>
33 #include <stdlib.h>
34 
35 // Own Header
36 #include "opticflow_calculator.h"
37 
38 // Computer Vision
39 #include "lib/vision/image.h"
41 #include "lib/vision/fast_rosten.h"
42 #include "lib/vision/act_fast.h"
43 #include "lib/vision/edge_flow.h"
45 #include "size_divergence.h"
46 #include "linear_flow_fit.h"
47 #include "modules/sonar/agl_dist.h"
48 #include "generated/airframe.h"
49 
50 // to get the definition of front_camera / bottom_camera
51 #include BOARD_CONFIG
52 
53 // whether to show the flow and corners:
54 #define OPTICFLOW_SHOW_CORNERS 0
55 
56 #define EXHAUSTIVE_FAST 0
57 #define ACT_FAST 1
58 // TODO: these are now adapted, but perhaps later could be a setting:
59 uint16_t n_time_steps[2] = {10, 10};
60 uint16_t n_agents[2] = {25, 25};
61 
62 // What methods are run to determine divergence, lateral flow, etc.
63 // SIZE_DIV looks at line sizes and only calculates divergence
64 #define SIZE_DIV 1
65 // LINEAR_FIT makes a linear optical flow field fit and extracts a lot of information:
66 // relative velocities in x, y, z (divergence / time to contact), the slope of the surface, and the surface roughness.
67 #define LINEAR_FIT 0
68 
69 #ifndef OPTICFLOW_CORNER_METHOD
70 #define OPTICFLOW_CORNER_METHOD ACT_FAST
71 #endif
72 
73 #ifndef OPTICFLOW_CORNER_METHOD_CAMERA2
74 #define OPTICFLOW_CORNER_METHOD_CAMERA2 ACT_FAST
75 #endif
78 
79 /* Set the default values */
80 #ifndef OPTICFLOW_MAX_TRACK_CORNERS
81 #define OPTICFLOW_MAX_TRACK_CORNERS 25
82 #endif
83 
84 #ifndef OPTICFLOW_MAX_TRACK_CORNERS_CAMERA2
85 #define OPTICFLOW_MAX_TRACK_CORNERS_CAMERA2 25
86 #endif
89 
90 #ifndef OPTICFLOW_WINDOW_SIZE
91 #define OPTICFLOW_WINDOW_SIZE 10
92 #endif
93 
94 #ifndef OPTICFLOW_WINDOW_SIZE_CAMERA2
95 #define OPTICFLOW_WINDOW_SIZE_CAMERA2 10
96 #endif
99 
100 #ifndef OPTICFLOW_SEARCH_DISTANCE
101 #define OPTICFLOW_SEARCH_DISTANCE 20
102 #endif
103 
104 #ifndef OPTICFLOW_SEARCH_DISTANCE_CAMERA2
105 #define OPTICFLOW_SEARCH_DISTANCE_CAMERA2 20
106 #endif
109 
110 #ifndef OPTICFLOW_SUBPIXEL_FACTOR
111 #define OPTICFLOW_SUBPIXEL_FACTOR 10
112 #endif
113 
114 #ifndef OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2
115 #define OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2 10
116 #endif
119 
120 #ifndef OPTICFLOW_RESOLUTION_FACTOR
121 #define OPTICFLOW_RESOLUTION_FACTOR 100
122 #endif
123 
124 #ifndef OPTICFLOW_RESOLUTION_FACTOR_CAMERA2
125 #define OPTICFLOW_RESOLUTION_FACTOR_CAMERA2 100
126 #endif
129 
130 #ifndef OPTICFLOW_MAX_ITERATIONS
131 #define OPTICFLOW_MAX_ITERATIONS 10
132 #endif
133 
134 #ifndef OPTICFLOW_MAX_ITERATIONS_CAMERA2
135 #define OPTICFLOW_MAX_ITERATIONS_CAMERA2 10
136 #endif
139 
140 #ifndef OPTICFLOW_THRESHOLD_VEC
141 #define OPTICFLOW_THRESHOLD_VEC 2
142 #endif
143 
144 #ifndef OPTICFLOW_THRESHOLD_VEC_CAMERA2
145 #define OPTICFLOW_THRESHOLD_VEC_CAMERA2 2
146 #endif
149 
150 #ifndef OPTICFLOW_PYRAMID_LEVEL
151 #define OPTICFLOW_PYRAMID_LEVEL 2
152 #endif
153 
154 #ifndef OPTICFLOW_PYRAMID_LEVEL_CAMERA2
155 #define OPTICFLOW_PYRAMID_LEVEL_CAMERA2 2
156 #endif
159 
160 #ifndef OPTICFLOW_FAST9_ADAPTIVE
161 #define OPTICFLOW_FAST9_ADAPTIVE TRUE
162 #endif
163 
164 #ifndef OPTICFLOW_FAST9_ADAPTIVE_CAMERA2
165 #define OPTICFLOW_FAST9_ADAPTIVE_CAMERA2 TRUE
166 #endif
169 
170 #ifndef OPTICFLOW_FAST9_THRESHOLD
171 #define OPTICFLOW_FAST9_THRESHOLD 20
172 #endif
173 
174 #ifndef OPTICFLOW_FAST9_THRESHOLD_CAMERA2
175 #define OPTICFLOW_FAST9_THRESHOLD_CAMERA2 20
176 #endif
179 
180 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE
181 #define OPTICFLOW_FAST9_MIN_DISTANCE 10
182 #endif
183 
184 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2
185 #define OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2 10
186 #endif
189 
190 #ifndef OPTICFLOW_FAST9_PADDING
191 #define OPTICFLOW_FAST9_PADDING 20
192 #endif
193 
194 #ifndef OPTICFLOW_FAST9_PADDING_CAMERA2
195 #define OPTICFLOW_FAST9_PADDING_CAMERA2 20
196 #endif
199 
200 // thresholds FAST9 that are currently not set from the GCS:
201 #define FAST9_LOW_THRESHOLD 5
202 #define FAST9_HIGH_THRESHOLD 60
203 
204 
205 #ifndef OPTICFLOW_METHOD
206 #define OPTICFLOW_METHOD 0
207 #endif
208 
209 #ifndef OPTICFLOW_METHOD_CAMERA2
210 #define OPTICFLOW_METHOD_CAMERA2 0
211 #endif
214 
215 #if OPTICFLOW_METHOD > 1
216 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected camera1
217 #endif
218 
219 #if OPTICFLOW_METHOD_CAMERA2 > 1
220 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected camera2
221 #endif
222 
223 #ifndef OPTICFLOW_DEROTATION
224 #define OPTICFLOW_DEROTATION TRUE
225 #endif
226 
227 #ifndef OPTICFLOW_DEROTATION_CAMERA2
228 #define OPTICFLOW_DEROTATION_CAMERA2 TRUE
229 #endif
232 
233 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
234 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X 1.0
235 #endif
236 
237 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X_CAMERA2
238 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X_CAMERA2 1.0
239 #endif
242 
243 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
244 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y 1.0
245 #endif
246 
247 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y_CAMERA2
248 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y_CAMERA2 1.0
249 #endif
252 
253 #ifndef OPTICFLOW_MEDIAN_FILTER
254 #define OPTICFLOW_MEDIAN_FILTER FALSE
255 #endif
256 
257 #ifndef OPTICFLOW_MEDIAN_FILTER_CAMERA2
258 #define OPTICFLOW_MEDIAN_FILTER_CAMERA2 FALSE
259 #endif
262 
263 #ifndef OPTICFLOW_FEATURE_MANAGEMENT
264 #define OPTICFLOW_FEATURE_MANAGEMENT 0
265 #endif
266 
267 #ifndef OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2
268 #define OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2 0
269 #endif
272 
273 #ifndef OPTICFLOW_FAST9_REGION_DETECT
274 #define OPTICFLOW_FAST9_REGION_DETECT 1
275 #endif
276 
277 #ifndef OPTICFLOW_FAST9_REGION_DETECT_CAMERA2
278 #define OPTICFLOW_FAST9_REGION_DETECT_CAMERA2 1
279 #endif
282 
283 #ifndef OPTICFLOW_FAST9_NUM_REGIONS
284 #define OPTICFLOW_FAST9_NUM_REGIONS 9
285 #endif
286 
287 #ifndef OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2
288 #define OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2 9
289 #endif
292 
293 #ifndef OPTICFLOW_ACTFAST_LONG_STEP
294 #define OPTICFLOW_ACTFAST_LONG_STEP 10
295 #endif
296 
297 #ifndef OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2
298 #define OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2 10
299 #endif
302 
303 #ifndef OPTICFLOW_ACTFAST_SHORT_STEP
304 #define OPTICFLOW_ACTFAST_SHORT_STEP 2
305 #endif
306 
307 #ifndef OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2
308 #define OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2 2
309 #endif
312 
313 #ifndef OPTICFLOW_ACTFAST_GRADIENT_METHOD
314 #define OPTICFLOW_ACTFAST_GRADIENT_METHOD 1
315 #endif
316 
317 #ifndef OPTICFLOW_ACTFAST_GRADIENT_METHOD_CAMERA2
318 #define OPTICFLOW_ACTFAST_GRADIENT_METHOD_CAMERA2 1
319 #endif
322 
323 #ifndef OPTICFLOW_ACTFAST_MIN_GRADIENT
324 #define OPTICFLOW_ACTFAST_MIN_GRADIENT 10
325 #endif
326 
327 #ifndef OPTICFLOW_ACTFAST_MIN_GRADIENT_CAMERA2
328 #define OPTICFLOW_ACTFAST_MIN_GRADIENT_CAMERA2 10
329 #endif
332 
333 // Defaults for ARdrone
334 #ifndef OPTICFLOW_BODY_TO_CAM_PHI
335 #define OPTICFLOW_BODY_TO_CAM_PHI 0
336 #endif
337 #ifndef OPTICFLOW_BODY_TO_CAM_THETA
338 #define OPTICFLOW_BODY_TO_CAM_THETA 0
339 #endif
340 #ifndef OPTICFLOW_BODY_TO_CAM_PSI
341 #define OPTICFLOW_BODY_TO_CAM_PSI -M_PI_2
342 #endif
343 
344 #ifndef OPTICFLOW_BODY_TO_CAM_PHI_CAMERA2
345 #define OPTICFLOW_BODY_TO_CAM_PHI_CAMERA2 0
346 #endif
347 #ifndef OPTICFLOW_BODY_TO_CAM_THETA_CAMERA2
348 #define OPTICFLOW_BODY_TO_CAM_THETA_CAMERA2 0
349 #endif
350 #ifndef OPTICFLOW_BODY_TO_CAM_PSI_CAMERA2
351 #define OPTICFLOW_BODY_TO_CAM_PSI_CAMERA2 -M_PI_2
352 #endif
353 
354 // Tracking back flow to make the accepted flow vectors more robust:
355 // Default is false, as it does take extra processing time
356 #ifndef OPTICFLOW_TRACK_BACK
357 #define OPTICFLOW_TRACK_BACK FALSE
358 #endif
359 
360 #ifndef OPTICFLOW_TRACK_BACK_CAMERA2
361 #define OPTICFLOW_TRACK_BACK_CAMERA2 FALSE
362 #endif
365 
366 // Whether to draw the flow on the image:
367 // False by default, since it changes the image and costs time.
368 #ifndef OPTICFLOW_SHOW_FLOW
369 #define OPTICFLOW_SHOW_FLOW FALSE
370 #endif
371 
372 #ifndef OPTICFLOW_SHOW_FLOW_CAMERA2
373 #define OPTICFLOW_SHOW_FLOW_CAMERA2 FALSE
374 #endif
377 
378 
379 //Include median filter
380 #include "filters/median_filter.h"
382 struct FloatRMat body_to_cam[2];
383 
384 /* Functions only used here */
385 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime);
386 static int cmp_flow(const void *a, const void *b);
387 static int cmp_array(const void *a, const void *b);
388 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow,
389  struct opticflow_result_t *result);
390 
391 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
392  float theta_diff, float psi_diff, struct opticflow_t *opticflow);
398 {
399  /* Set the default values */
400  opticflow[0].method = OPTICFLOW_METHOD; //0 = LK_fast9, 1 = Edgeflow
403  opticflow[0].derotation = OPTICFLOW_DEROTATION; //0 = OFF, 1 = ON
410  if (opticflow[0].subpixel_factor == 0) {
411  opticflow[0].subpixel_factor = 10;
412  }
421 
427  opticflow[0].fast9_ret_corners = calloc(opticflow[0].fast9_rsize, sizeof(struct point_t));
428 
434 
435  opticflow[0].camera = &OPTICFLOW_CAMERA;
436  opticflow[0].id = 0;
437 
439  float_rmat_of_eulers(&body_to_cam[0], &euler_cam1);
440 
441 #ifdef OPTICFLOW_CAMERA2
442  opticflow[1].method = OPTICFLOW_METHOD_CAMERA2; //0 = LK_fast9, 1 = Edgeflow
445  opticflow[1].derotation = OPTICFLOW_DEROTATION_CAMERA2; //0 = OFF, 1 = ON
452  if (opticflow[1].subpixel_factor == 0) {
453  opticflow[1].subpixel_factor = 10;
454  }
463 
469  opticflow[1].fast9_ret_corners = calloc(opticflow[0].fast9_rsize, sizeof(struct point_t));
470 
476 
477  opticflow[1].camera = &OPTICFLOW_CAMERA2;
478  opticflow[1].id = 1;
479 
482  };
483  float_rmat_of_eulers(&body_to_cam[1], &euler_cam2);
484 #endif
485 }
495  struct opticflow_result_t *result)
496 {
498  // Create the image buffers
501 
502  // Set the previous values
503  opticflow->got_first_img = false;
504 
505  // Init median filters with zeros
507  }
508 
509  // Convert image to grayscale
511 
512  if (!opticflow->got_first_img) {
514  opticflow->got_first_img = true;
515  return false;
516  }
517 
518  // variables for linear flow fit:
519  float error_threshold;
520  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
521  struct linear_flow_fit_info fit_info;
522 
523  // Update FPS for information
524  float dt = timeval_diff(&(opticflow->prev_img_gray.ts), &(img->ts));
525  if (dt > 1e-5) {
526  result->fps = 1000.f / dt;
527  } else {
528  return false;
529  }
530 
531  // *************************************************************************************
532  // Corner detection
533  // *************************************************************************************
534 
535  // if feature_management is selected and tracked corners drop below a threshold, redetect
537  manage_flow_features(img, opticflow, result);
538  } else if (!opticflow->feature_management) {
539  // needs to be set to 0 because result is now static
540  result->corner_cnt = 0;
542  // FAST corner detection
543  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
544  // to 0 (see defines), however a more permanent solution should be considered
547  &opticflow->fast9_ret_corners, NULL);
548  } else if (opticflow->corner_method == ACT_FAST) {
549  // ACT-FAST corner detection:
554  }
555 
556  // Adaptive threshold
557  if (opticflow->fast9_adaptive) {
558 
559  // This works well for exhaustive FAST, but drives the threshold to the minimum for ACT-FAST:
560  // Decrease and increase the threshold based on previous values
561  if (result->corner_cnt < opticflow->max_track_corners / 2) {
562  // make detections easier:
565  }
566 
569  n_agents[opticflow->id]++;
570  }
571 
572  } else if (result->corner_cnt > opticflow->max_track_corners * 2) {
573 
576  }
577 
580  n_agents[opticflow->id]--;
581  }
582  }
583  }
584  }
585 
586 #if OPTICFLOW_SHOW_CORNERS
588 #endif
589 
590  // Check if we found some corners to track
591  if (result->corner_cnt < 1) {
592  // Clear the result otherwise the previous values will be returned for this frame too
593  VECT3_ASSIGN(result->vel_cam, 0, 0, 0);
594  VECT3_ASSIGN(result->vel_body, 0, 0, 0);
595  result->div_size = 0;
596  result->divergence = 0;
597  result->noise_measurement = 5.0;
598 
600  return false;
601  }
602 
603  // *************************************************************************************
604  // Corner Tracking
605  // *************************************************************************************
606 
607  // Execute a Lucas Kanade optical flow
608  result->tracked_cnt = result->corner_cnt;
609  uint8_t keep_bad_points = 0;
611  &result->tracked_cnt,
614 
615 
616  if (opticflow->track_back) {
617  // TODO: Watch out!
618  // We track the flow back and give badly back-tracked vectors a high error,
619  // but we do not yet remove these vectors, nor use the errors in any other function than showing the flow.
620 
621  // initialize corners at the tracked positions:
622  for (int i = 0; i < result->tracked_cnt; i++) {
623  opticflow->fast9_ret_corners[i].x = (uint32_t)(vectors[i].pos.x + vectors[i].flow_x) / opticflow->subpixel_factor;
624  opticflow->fast9_ret_corners[i].y = (uint32_t)(vectors[i].pos.y + vectors[i].flow_y) / opticflow->subpixel_factor;
625  }
626 
627  // present the images in the opposite order:
628  keep_bad_points = 1;
629  uint16_t back_track_cnt = result->tracked_cnt;
631  &back_track_cnt,
634 
635  // printf("Tracked %d points back.\n", back_track_cnt);
636  int32_t back_x, back_y, diff_x, diff_y, dist_squared;
637  int32_t back_track_threshold = 200;
638 
639  for (int i = 0; i < result->tracked_cnt; i++) {
640  if (back_vectors[i].error < LARGE_FLOW_ERROR) {
641  back_x = (int32_t)(back_vectors[i].pos.x + back_vectors[i].flow_x);
642  back_y = (int32_t)(back_vectors[i].pos.y + back_vectors[i].flow_y);
643  diff_x = back_x - vectors[i].pos.x;
644  diff_y = back_y - vectors[i].pos.y;
645  dist_squared = diff_x * diff_x + diff_y * diff_y;
646  // printf("Vector %d: x,y = %d, %d, back x, y = %d, %d, back tracking error %d\n", i, vectors[i].pos.x, vectors[i].pos.y, back_x, back_y, dist_squared);
647  if (dist_squared > back_track_threshold) {
648  vectors[i].error = LARGE_FLOW_ERROR;
649  }
650  } else {
651  vectors[i].error = LARGE_FLOW_ERROR;
652  }
653  }
654 
655  free(back_vectors);
656  }
657 
658  if (opticflow->show_flow) {
659  uint8_t color[4] = {0, 0, 0, 0};
660  uint8_t bad_color[4] = {0, 0, 0, 0};
661  image_show_flow_color(img, vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
662  }
663 
664  static int n_samples = 100;
665  // Estimate size divergence:
666  if (SIZE_DIV) {
667  result->div_size = get_size_divergence(vectors, result->tracked_cnt, n_samples);// * result->fps;
668  } else {
669  result->div_size = 0.0f;
670  }
671 
672  if (LINEAR_FIT) {
673  // Linear flow fit (normally derotation should be performed before):
674  error_threshold = 10.0f;
675  n_iterations_RANSAC = 20;
676  n_samples_RANSAC = 5;
677  success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
678  n_samples_RANSAC, img->w, img->h, &fit_info);
679 
680  if (!success_fit) {
681  fit_info.divergence = 0.0f;
682  fit_info.surface_roughness = 0.0f;
683  }
684 
685  result->divergence = fit_info.divergence;
686  result->surface_roughness = fit_info.surface_roughness;
687 
688  // Flow fit with rotations:
689  error_threshold = 10.0f;
690  n_iterations_RANSAC = 20;
691  n_samples_RANSAC = 5;
692  success_fit = analyze_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
693  n_samples_RANSAC, img->w, img->h, OPTICFLOW_CAMERA.camera_intrinsics.focal_x, &fit_info);
694 
695  } else {
696  result->divergence = 0.0f;
697  result->surface_roughness = 0.0f;
698  }
699 
700  // Get the median flow
701  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
702  if (result->tracked_cnt == 0) {
703  // We got no flow
704  result->flow_x = 0;
705  result->flow_y = 0;
706 
707  free(vectors);
709  return false;
710  } else if (result->tracked_cnt % 2) {
711  // Take the median point
712  result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
713  result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
714  } else {
715  // Take the average of the 2 median points
716  result->flow_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
717  result->flow_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
718  }
719 
720  // TODO scale flow to rad/s here
721 
722  // ***************
723  // Flow Derotation
724  // ***************
725 
726  float diff_flow_x = 0.f;
727  float diff_flow_y = 0.f;
728 
729  if (opticflow->derotation && result->tracked_cnt > 5) {
730 
731  float rotation_threshold = M_PI / 180.0f;
732  if (fabs(opticflow->img_gray.eulers.phi - opticflow->prev_img_gray.eulers.phi) > rotation_threshold
733  || fabs(opticflow->img_gray.eulers.theta - opticflow->prev_img_gray.eulers.theta) > rotation_threshold) {
734 
735  // do not apply the derotation if the rotation rates are too high:
736  result->flow_der_x = 0.0f;
737  result->flow_der_y = 0.0f;
738 
739  } else {
740 
741  // determine the roll, pitch, yaw differences between the images.
745 
746  if (strcmp(opticflow->camera->dev_name, bottom_camera.dev_name) == 0) {
747  // bottom cam: just subtract a scaled version of the roll and pitch difference from the global flow vector:
748  diff_flow_x = phi_diff *
749  opticflow->camera->camera_intrinsics.focal_x; // phi_diff works better than (cam_state->rates.p)
750  diff_flow_y = theta_diff * opticflow->camera->camera_intrinsics.focal_y;
751  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
753  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
755  } else {
756  // frontal cam, predict individual flow vectors:
757  struct flow_t *predicted_flow_vectors = predict_flow_vectors(vectors, result->tracked_cnt, phi_diff, theta_diff,
758  psi_diff, opticflow);
759  if (opticflow->show_flow) {
760  uint8_t color[4] = {255, 255, 255, 255};
761  uint8_t bad_color[4] = {255, 255, 255, 255};
762  image_show_flow_color(img, predicted_flow_vectors, result->tracked_cnt, opticflow->subpixel_factor, color, bad_color);
763  }
764 
765  for (int i = 0; i < result->tracked_cnt; i++) {
766  // subtract the flow:
767  vectors[i].flow_x -= predicted_flow_vectors[i].flow_x;
768  vectors[i].flow_y -= predicted_flow_vectors[i].flow_y;
769  }
770 
771  // vectors have to be re-sorted after derotation:
772  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
773 
774  if (result->tracked_cnt % 2) {
775  // Take the median point
776  result->flow_der_x = vectors[result->tracked_cnt / 2].flow_x;
777  result->flow_der_y = vectors[result->tracked_cnt / 2].flow_y;
778  } else {
779  // Take the average of the 2 median points
780  result->flow_der_x = (vectors[result->tracked_cnt / 2 - 1].flow_x + vectors[result->tracked_cnt / 2].flow_x) / 2.f;
781  result->flow_der_y = (vectors[result->tracked_cnt / 2 - 1].flow_y + vectors[result->tracked_cnt / 2].flow_y) / 2.f;
782  }
783  }
784  }
785  }
786  result->camera_id = opticflow->id;
787 
788  // Velocity calculation
789  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
790  // TODO: Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
791  // TODO: This is actually only correct for the bottom camera:
792  result->vel_cam.x = (float)result->flow_der_x * result->fps * agl_dist_value_filtered /
794  result->vel_cam.y = (float)result->flow_der_y * result->fps * agl_dist_value_filtered /
796  result->vel_cam.z = result->divergence * result->fps * agl_dist_value_filtered;
797 
798  //Apply a median filter to the velocity if wanted
799  if (opticflow->median_filter == true) {
801  }
802 
803  // Determine quality of noise measurement for state filter
804  //TODO develop a noise model based on groundtruth
805  //result->noise_measurement = 1 - (float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25f);
806  result->noise_measurement = 0.25;
807 
808  // *************************************************************************************
809  // Next Loop Preparation
810  // *************************************************************************************
812  result->corner_cnt = result->tracked_cnt;
813  //get the new positions of the corners and the "residual" subpixel positions
814  for (uint16_t i = 0; i < result->tracked_cnt; i++) {
815  opticflow->fast9_ret_corners[i].x = (uint32_t)((vectors[i].pos.x + (float)vectors[i].flow_x) /
817  opticflow->fast9_ret_corners[i].y = (uint32_t)((vectors[i].pos.y + (float)vectors[i].flow_y) /
819  opticflow->fast9_ret_corners[i].x_sub = (uint16_t)((vectors[i].pos.x + vectors[i].flow_x) % opticflow->subpixel_factor);
820  opticflow->fast9_ret_corners[i].y_sub = (uint16_t)((vectors[i].pos.y + vectors[i].flow_y) % opticflow->subpixel_factor);
821  opticflow->fast9_ret_corners[i].count = vectors[i].pos.count;
822  }
823  }
824  free(vectors);
826  return true;
827 }
828 
829 /*
830  * Predict flow vectors by means of the rotation rates:
831  */
832 static struct flow_t *predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff,
833  float theta_diff, float psi_diff, struct opticflow_t *opticflow)
834 {
835 
836  // reserve memory for the predicted flow vectors:
837  struct flow_t *predicted_flow_vectors = malloc(sizeof(struct flow_t) * n_points);
838 
841  0.0f, 0.0f, 1.0f
842  };
843  // TODO: make an option to not do distortion / undistortion (Dhane_k = 1)
845 
846  float A, B, C; // as in Longuet-Higgins
847 
848  if (strcmp(opticflow->camera->dev_name, front_camera.dev_name) == 0) {
849  // specific for the x,y swapped Bebop 2 images:
850  A = -psi_diff;
851  B = theta_diff;
852  C = phi_diff;
853  } else {
854  A = theta_diff;
855  B = phi_diff;
856  C = psi_diff;
857  }
858 
859  float x_n, y_n;
860  float x_n_new, y_n_new, x_pix_new, y_pix_new;
861  float predicted_flow_x, predicted_flow_y;
862  for (uint16_t i = 0; i < n_points; i++) {
863  // the from-coordinate is always the same:
864  predicted_flow_vectors[i].pos.x = flow_vectors[i].pos.x;
865  predicted_flow_vectors[i].pos.y = flow_vectors[i].pos.y;
866 
867  bool success = distorted_pixels_to_normalized_coords((float)flow_vectors[i].pos.x / opticflow->subpixel_factor,
868  (float)flow_vectors[i].pos.y / opticflow->subpixel_factor, &x_n, &y_n, k, K);
869  if (success) {
870  // predict flow as in a linear pinhole camera model:
871  predicted_flow_x = A * x_n * y_n - B * x_n * x_n - B + C * y_n;
872  predicted_flow_y = -C * x_n + A + A * y_n * y_n - B * x_n * y_n;
873 
874  x_n_new = x_n + predicted_flow_x;
875  y_n_new = y_n + predicted_flow_y;
876 
877  success = normalized_coords_to_distorted_pixels(x_n_new, y_n_new, &x_pix_new, &y_pix_new, k, K);
878 
879  if (success) {
880  predicted_flow_vectors[i].flow_x = (int16_t)(x_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.x);
881  predicted_flow_vectors[i].flow_y = (int16_t)(y_pix_new * opticflow->subpixel_factor - (float)flow_vectors[i].pos.y);
882  predicted_flow_vectors[i].error = 0;
883  } else {
884  predicted_flow_vectors[i].flow_x = 0;
885  predicted_flow_vectors[i].flow_y = 0;
886  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
887  }
888  } else {
889  predicted_flow_vectors[i].flow_x = 0;
890  predicted_flow_vectors[i].flow_y = 0;
891  predicted_flow_vectors[i].error = LARGE_FLOW_ERROR;
892  }
893  }
894  return predicted_flow_vectors;
895 }
896 
897 
898 /* manage_flow_features - Update list of corners to be tracked by LK
899  * Remembers previous points and tries to find new points in less dense
900  * areas of the image first.
901  *
902  */
903 static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
904 {
905  // first check if corners have not moved too close together due to flow:
906  int16_t c1 = 0;
907  while (c1 < (int16_t)result->corner_cnt - 1) {
908  bool exists = false;
909  for (int16_t i = c1 + 1; i < result->corner_cnt; i++) {
914  // if too close, replace the corner with the last one in the list:
920 
921  // decrease the number of corners:
922  result->corner_cnt--;
923  exists = true;
924  // no further checking required for the removed corner
925  break;
926  }
927  }
928  // if the corner has been replaced, the new corner in position c1 has to be checked again:
929  if (!exists) { c1++; }
930  }
931 
932  // no need for "per region" re-detection when there are no previous corners
933  if ((!opticflow->fast9_region_detect) || (result->corner_cnt == 0)) {
938  NULL);
939  } else {
940  // allocating memory and initializing the 2d array that holds the number of corners per region and its index (for the sorting)
941  uint16_t **region_count = calloc(opticflow->fast9_num_regions, sizeof(uint16_t *));
942  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
943  region_count[i] = calloc(2, sizeof(uint16_t));
944  region_count[i][0] = 0;
945  region_count[i][1] = i;
946  }
947  uint16_t root_regions = (uint16_t)sqrtf((float)opticflow->fast9_num_regions);
948  int region_index;
949  for (uint16_t i = 0; i < result->corner_cnt; i++) {
950  region_index = (opticflow->fast9_ret_corners[i].x * root_regions / img->w
951  + root_regions * (opticflow->fast9_ret_corners[i].y * root_regions / img->h));
952  region_index = (region_index < opticflow->fast9_num_regions) ? region_index : opticflow->fast9_num_regions - 1;
953  region_count[region_index][0]++;
954  }
955 
956  //sorting region_count array according to first column (number of corners).
957  qsort(region_count, opticflow->fast9_num_regions, sizeof(region_count[0]), cmp_array);
958 
959  uint16_t roi[4];
960  // Detecting corners from the region with the less to the one with the most, until a desired total is reached.
961  for (uint16_t i = 0; i < opticflow->fast9_num_regions && result->corner_cnt < 2 * opticflow->max_track_corners; i++) {
962  // Find the boundaries of the region of interest
963  roi[0] = (region_count[i][1] % root_regions) * (img->w / root_regions);
964  roi[1] = (region_count[i][1] / root_regions) * (img->h / root_regions);
965  roi[2] = roi[0] + (img->w / root_regions);
966  roi[3] = roi[1] + (img->h / root_regions);
967 
968  struct point_t *new_corners = calloc(opticflow->fast9_rsize, sizeof(struct point_t));
969  uint16_t new_count = 0;
970 
973  &opticflow->fast9_rsize, &new_corners, roi);
974 
975  // check that no identified points already exist in list
976  for (uint16_t j = 0; j < new_count; j++) {
977  bool exists = false;
978  for (uint16_t k = 0; k < result->corner_cnt; k++) {
979  if (abs((int16_t)new_corners[j].x - (int16_t)opticflow->fast9_ret_corners[k].x) < (int16_t)opticflow->fast9_min_distance
980  && abs((int16_t)new_corners[j].y - (int16_t)opticflow->fast9_ret_corners[k].y) < (int16_t)
982  exists = true;
983  break;
984  }
985  }
986  if (!exists) {
987  opticflow->fast9_ret_corners[result->corner_cnt].x = new_corners[j].x;
988  opticflow->fast9_ret_corners[result->corner_cnt].y = new_corners[j].y;
992  result->corner_cnt++;
993 
994  if (result->corner_cnt >= opticflow->fast9_rsize) {
995  break;
996  }
997  }
998  }
999 
1000  free(new_corners);
1001  }
1002  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
1003  free(region_count[i]);
1004  }
1005  free(region_count);
1006  }
1007 }
1008 
1018  struct opticflow_result_t *result)
1019 {
1020  // Define Static Variables
1021  static struct edge_hist_t edge_hist[MAX_HORIZON];
1022  static uint8_t current_frame_nr = 0;
1023  struct edge_flow_t edgeflow;
1024  static uint8_t previous_frame_offset[2] = {1, 1};
1025 
1026  // Define Normal variables
1027  struct edgeflow_displacement_t displacement;
1028  displacement.x = calloc(img->w, sizeof(int32_t));
1029  displacement.y = calloc(img->h, sizeof(int32_t));
1030  // If the methods just switched to this one, reintialize the
1031  // array of edge_hist structure.
1032  if (opticflow->just_switched_method == 1 && edge_hist[0].x == NULL) {
1033  int i;
1034  for (i = 0; i < MAX_HORIZON; i++) {
1035  edge_hist[i].x = calloc(img->w, sizeof(int32_t));
1036  edge_hist[i].y = calloc(img->h, sizeof(int32_t));
1037  FLOAT_EULERS_ZERO(edge_hist[i].eulers);
1038  }
1039  }
1040  uint16_t disp_range;
1042  disp_range = opticflow->search_distance;
1043  } else {
1044  disp_range = DISP_RANGE_MAX;
1045  }
1046 
1047  uint16_t window_size;
1048 
1050  window_size = opticflow->window_size;
1051  } else {
1052  window_size = MAX_WINDOW_SIZE;
1053  }
1054 
1056 
1057  //......................Calculating EdgeFlow..................... //
1058 
1059  // Calculate current frame's edge histogram
1060  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
1061  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
1062  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
1063  calculate_edge_histogram(img, edge_hist_y, 'y', 0);
1064 
1065 
1066  // Copy frame time and angles of image to calculated edge histogram
1067  edge_hist[current_frame_nr].frame_time = img->ts;
1068  edge_hist[current_frame_nr].eulers = img->eulers;
1069 
1070  // Calculate which previous edge_hist to compare with the current
1071  uint8_t previous_frame_nr[2];
1072  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);
1073 
1074  //Select edge histogram from the previous frame nr
1075  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
1076  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;
1077 
1078  //Calculate the corresponding derotation of the two frames
1079  int16_t der_shift_x = 0;
1080  int16_t der_shift_y = 0;
1081  if (opticflow->derotation) {
1082  der_shift_x = (int16_t)((edge_hist[current_frame_nr].eulers.phi - edge_hist[previous_frame_nr[0]].eulers.phi) *
1084  der_shift_y = (int16_t)((edge_hist[current_frame_nr].eulers.theta - edge_hist[previous_frame_nr[1]].eulers.theta) *
1086  }
1087 
1088  // Estimate pixel wise displacement of the edge histograms for x and y direction
1089  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
1090  displacement.x, img->w,
1091  window_size, disp_range, der_shift_x);
1092  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
1093  displacement.y, img->h,
1094  window_size, disp_range, der_shift_y);
1095 
1096  // Fit a line on the pixel displacement to estimate
1097  // the global pixel flow and divergence (RES is resolution)
1098  line_fit(displacement.x, &edgeflow.div_x,
1099  &edgeflow.flow_x, img->w,
1100  window_size + disp_range, RES);
1101  line_fit(displacement.y, &edgeflow.div_y,
1102  &edgeflow.flow_y, img->h,
1103  window_size + disp_range, RES);
1104 
1105  /* Save Resulting flow in results
1106  * Warning: The flow detected here is different in sign
1107  * and size, therefore this will be divided with
1108  * the same subpixel factor and multiplied by -1 to make it
1109  * on par with the LK algorithm in opticalflow_calculator.c
1110  * */
1111  edgeflow.flow_x = -1 * edgeflow.flow_x;
1112  edgeflow.flow_y = -1 * edgeflow.flow_y;
1113 
1114  edgeflow.flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
1115  edgeflow.flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];
1116 
1117  result->flow_x = (int16_t)edgeflow.flow_x / RES;
1118  result->flow_y = (int16_t)edgeflow.flow_y / RES;
1119 
1120  //Fill up the results optic flow to be on par with LK_fast9
1121  result->flow_der_x = result->flow_x;
1122  result->flow_der_y = result->flow_y;
1123  result->corner_cnt = getAmountPeaks(edge_hist_x, 500, img->w);
1124  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500, img->w);
1125  result->divergence = -1.0 * (float)edgeflow.div_x /
1126  RES; // Also multiply the divergence with -1.0 to make it on par with the LK algorithm of
1127  result->div_size =
1128  result->divergence; // Fill the div_size with the divergence to atleast get some divergenge measurement when switching from LK to EF
1129  result->camera_id = opticflow->id;
1130  result->surface_roughness = 0.0f;
1131  //......................Calculating VELOCITY ..................... //
1132 
1133  /*Estimate fps per direction
1134  * This is the fps with adaptive horizon for subpixel flow, which is not similar
1135  * to the loop speed of the algorithm. The faster the quadcopter flies
1136  * the higher it becomes
1137  */
1138  float fps_x = 0;
1139  float fps_y = 0;
1140  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
1141  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
1142  fps_x = 1 / (time_diff_x);
1143  fps_y = 1 / (time_diff_y);
1144 
1145  result->fps = fps_x;
1146 
1147  // TODO scale flow to rad/s here
1148 
1149  // Calculate velocity
1151  RES;
1153  RES;
1154  result->vel_cam.z = result->divergence * fps_x * agl_dist_value_filtered;
1155 
1156  //Apply a median filter to the velocity if wanted
1157  if (opticflow->median_filter == true) {
1159  }
1160 
1161  result->noise_measurement = 0.2;
1162  if (opticflow->show_flow) {
1163  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
1164  }
1165  // Increment and wrap current time frame
1166  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
1167  // Free alloc'd variables
1168  free(displacement.x);
1169  free(displacement.y);
1170  return true;
1171 }
1172 
1173 
1182  struct opticflow_result_t *result)
1183 {
1184  bool flow_successful = false;
1185  // A switch counter that checks in the loop if the current method is similar,
1186  // to the previous (for reinitializing structs)
1187  static int8_t switch_counter[2] = {-1, -1};
1188  if (switch_counter[opticflow->id] != opticflow->method) {
1190  switch_counter[opticflow->id] = opticflow->method;
1191  // Clear the static result
1192  memset(result, 0, sizeof(struct opticflow_result_t));
1193  } else {
1195  }
1196 
1197  // Switch between methods (0 = fast9/lukas-kanade, 1 = EdgeFlow)
1198  if (opticflow->method == 0) {
1199  flow_successful = calc_fast9_lukas_kanade(opticflow, img, result);
1200  } else if (opticflow->method == 1) {
1201  flow_successful = calc_edgeflow_tot(opticflow, img, result);
1202  }
1203  /* Rotate velocities from camera frame coordinates to body coordinates for control
1204  * IMPORTANT!!! This frame to body orientation should be the case for the Parrot
1205  * ARdrone and Bebop, however this can be different for other quadcopters
1206  * ALWAYS double check!
1207  */
1209 
1210  return flow_successful;
1211 }
1212 
1219 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
1220 {
1221  uint32_t msec;
1222  msec = (finishtime->tv_sec - starttime->tv_sec) * 1000;
1223  msec += (finishtime->tv_usec - starttime->tv_usec) / 1000;
1224  return msec;
1225 }
1226 
1234 static int cmp_flow(const void *a, const void *b)
1235 {
1236  const struct flow_t *a_p = (const struct flow_t *)a;
1237  const struct flow_t *b_p = (const struct flow_t *)b;
1238  return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y *
1239  b_p->flow_y);
1240 }
1241 
1249 static int cmp_array(const void *a, const void *b)
1250 {
1251  const uint16_t *pa = (const uint16_t *)a;
1252  const uint16_t *pb = (const uint16_t *)b;
1253  return pa[0] - pb[0];
1254 }
void act_fast(struct image_t *img, uint8_t fast_threshold, uint16_t *num_corners, struct point_t **ret_corners, uint16_t n_agents, uint16_t n_time_steps, float long_step, float short_step, int min_gradient, int gradient_method, int camera_id)
Do an ACT-FAST corner detection.
Definition: act_fast.c:58
Finds corners in an image by actively scanning the image.
float agl_dist_value_filtered
Definition: agl_dist.c:35
Bind to agl ABI message and provide a filtered value to be used in flight plans.
#define B
static uint16_t c1
Definition: baro_MS5534A.c:203
int n_samples
Definition: detect_gate.c:85
#define RES
Definition: detect_window.c:27
uint32_t getAmountPeaks(int32_t *edgehist, int32_t thres, int32_t size)
getAmountPeaks, calculates the amount of peaks in a edge histogram
Definition: edge_flow.c:337
void calculate_edge_displacement(int32_t *edge_histogram, int32_t *edge_histogram_prev, int32_t *displacement, uint16_t size, uint8_t window, uint8_t disp_range, int32_t der_shift)
Calculate_displacement calculates the displacement between two histograms.
Definition: edge_flow.c:168
void draw_edgeflow_img(struct image_t *img, struct edge_flow_t edgeflow, int32_t *edge_hist_x_prev, int32_t *edge_hist_x)
Draws edgehistogram, displacement and linefit directly on the image for debugging (only for edgeflow ...
Definition: edge_flow.c:297
void calculate_edge_histogram(struct image_t *img, int32_t edge_histogram[], char direction, uint16_t edge_threshold)
Calculate a edge/gradient histogram for each dimension of the image.
Definition: edge_flow.c:88
void line_fit(int32_t *displacement, int32_t *divergence, int32_t *flow, uint32_t size, uint32_t border, uint16_t RES)
Fits a linear model to an array with pixel displacements with least squares.
Definition: edge_flow.c:247
void calc_previous_frame_nr(struct opticflow_result_t *result, struct opticflow_t *opticflow, uint8_t current_frame_nr, uint8_t *previous_frame_offset, uint8_t *previous_frame_nr)
Calc_previous_frame_nr; adaptive Time Horizon.
Definition: edge_flow.c:39
calculate optical flow with EdgeFlow
#define DISP_RANGE_MAX
Definition: edge_flow.h:53
struct FloatEulers eulers
Definition: edge_flow.h:69
#define MAX_WINDOW_SIZE
Definition: edge_flow.h:56
int32_t * x
Definition: edge_flow.h:66
struct timeval frame_time
Definition: edge_flow.h:68
int32_t div_x
Definition: edge_flow.h:79
int32_t * y
Definition: edge_flow.h:67
int32_t flow_y
Definition: edge_flow.h:80
#define MAX_HORIZON
Definition: edge_flow.h:46
int32_t div_y
Definition: edge_flow.h:81
int32_t flow_x
Definition: edge_flow.h:78
void fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners, uint16_t *ret_corners_length, struct point_t **ret_corners, uint16_t *roi)
Do a FAST9 corner detection.
Definition: fast_rosten.c:52
float phi
in radians
float theta
in radians
float psi
in radians
#define FLOAT_EULERS_ZERO(_e)
#define float_rmat_of_eulers
void float_rmat_transp_vmult(struct FloatVect3 *vb, struct FloatRMat *m_b2a, struct FloatVect3 *va)
rotate 3D vector by transposed rotation matrix.
euler angles
rotation matrix
#define VECT3_ASSIGN(_a, _x, _y, _z)
Definition: pprz_algebra.h:125
#define A
void image_copy(struct image_t *input, struct image_t *output)
Copy an image from inut to output This will only work if the formats are the same.
Definition: image.c:89
void image_switch(struct image_t *a, struct image_t *b)
This will switch image *a and *b This is faster as image_copy because it doesn't copy the whole image...
Definition: image.c:112
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt)
Show points in an image by coloring them through giving the pixels the maximum value.
Definition: image.c:679
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
Definition: image.c:131
void image_show_flow_color(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor, const uint8_t *color, const uint8_t *bad_color)
Shows the flow from a specific point to a new point This works on YUV422 and Grayscale images.
Definition: image.c:743
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
Definition: image.c:43
Image helper functions like resizing, color filter, converters...
struct timeval ts
The timestamp of creation.
Definition: image.h:48
uint16_t x_sub
The x subpixel coordinate of the point.
Definition: image.h:62
uint32_t x
The x coordinate of the point.
Definition: image.h:59
uint32_t y
The y coordinate of the point.
Definition: image.h:60
uint16_t h
Image height.
Definition: image.h:47
uint16_t w
Image width.
Definition: image.h:46
uint16_t count
Number of times the point has been tracked successfully.
Definition: image.h:61
struct point_t pos
The original position the flow comes from in subpixels.
Definition: image.h:79
struct FloatEulers eulers
Euler Angles at time of image.
Definition: image.h:49
uint32_t error
The matching error in the tracking process in subpixels.
Definition: image.h:82
int32_t flow_y
The y direction flow in subpixels.
Definition: image.h:81
uint16_t y_sub
The y subpixel coordinate of the point.
Definition: image.h:63
int32_t flow_x
The x direction flow in subpixels.
Definition: image.h:80
@ IMAGE_GRAYSCALE
Grayscale image with only the Y part (uint8 per pixel)
Definition: image.h:37
Definition: image.h:78
Definition: image.h:44
Definition: image.h:58
int16_t flow_der_x
The derotated flow calculation in the x direction (in subpixels)
float div_size
Divergence as determined with the size_divergence script.
float surface_roughness
Surface roughness as determined with a linear optical flow fit.
uint16_t tracked_cnt
The amount of tracked corners.
float divergence
Divergence as determined with a linear flow fit.
uint16_t corner_cnt
The amount of coners found by FAST9.
float fps
Frames per second of the optical flow calculation.
float noise_measurement
noise of measurement, for state filter
struct FloatVect3 vel_body
The velocity in body frame (m/s) with X positive to the front of the aircraft, Y positive to the righ...
int16_t flow_x
Flow in x direction from the camera (in subpixels) with X positive to the right.
uint8_t camera_id
Camera id as passed to cv_add_to_device.
struct FloatVect3 vel_cam
The velocity in camera frame (m/s)
int16_t flow_y
Flow in y direction from the camera (in subpixels) with Y positive to the bottom.
int16_t flow_der_y
The derotated flow calculation in the y direction (in subpixels)
bool analyze_flow_field(struct flow_t *vectors, int count, float error_threshold, int n_iterations, int n_samples, int im_width, int im_height, float focal_length, struct linear_flow_fit_info *info)
Analyze a flow field, retrieving information on relative velocities and rotations,...
bool analyze_linear_flow_field(struct flow_t *vectors, int count, float error_threshold, int n_iterations, int n_samples, int im_width, int im_height, struct linear_flow_fit_info *info)
Analyze a linear flow field, retrieving information such as divergence, surface roughness,...
float divergence
Basically, relative_velocity_z. Actual divergence of a 2D flow field is 2 * relative_velocity_z.
float surface_roughness
The error of the linear fit is a measure of surface roughness.
struct flow_t * opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint8_t max_points, uint8_t pyramid_level, uint8_t keep_bad_points)
Definition: lucas_kanade.c:74
efficient fixed-point optical-flow calculation
#define LARGE_FLOW_ERROR
Definition: lucas_kanade.h:37
#define InitMedianFilterVect3Float(_f, _n)
#define UpdateMedianFilterVect3Float(_f, _v)
#define MEDIAN_DEFAULT_SIZE
Definition: median_filter.h:28
#define front_camera
Definition: mt9f002_nps.c:3
#define bottom_camera
Definition: mt9v117_nps.c:3
PRINT_CONFIG_VAR(ONELOOP_ANDI_FILT_CUTOFF)
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X_CAMERA2
#define OPTICFLOW_MEDIAN_FILTER
#define OPTICFLOW_BODY_TO_CAM_THETA_CAMERA2
#define OPTICFLOW_FAST9_ADAPTIVE_CAMERA2
#define OPTICFLOW_ACTFAST_LONG_STEP
static void manage_flow_features(struct image_t *img, struct opticflow_t *opticflow, struct opticflow_result_t *result)
#define OPTICFLOW_SUBPIXEL_FACTOR_CAMERA2
#define OPTICFLOW_FEATURE_MANAGEMENT_CAMERA2
#define OPTICFLOW_MEDIAN_FILTER_CAMERA2
#define OPTICFLOW_FAST9_NUM_REGIONS_CAMERA2
#define OPTICFLOW_SHOW_FLOW
#define OPTICFLOW_ACTFAST_SHORT_STEP
#define OPTICFLOW_THRESHOLD_VEC
#define OPTICFLOW_PYRAMID_LEVEL_CAMERA2
struct FloatRMat body_to_cam[2]
#define ACT_FAST
#define OPTICFLOW_SUBPIXEL_FACTOR
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
static struct flow_t * predict_flow_vectors(struct flow_t *flow_vectors, uint16_t n_points, float phi_diff, float theta_diff, float psi_diff, struct opticflow_t *opticflow)
#define OPTICFLOW_ACTFAST_LONG_STEP_CAMERA2
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
Calculate the difference from start till finish.
#define OPTICFLOW_FAST9_MIN_DISTANCE
bool calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with fast9 and lukaskanade on a new image frame.
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y_CAMERA2
#define OPTICFLOW_MAX_ITERATIONS
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
uint16_t n_agents[2]
#define OPTICFLOW_ACTFAST_GRADIENT_METHOD
#define OPTICFLOW_SEARCH_DISTANCE_CAMERA2
#define OPTICFLOW_THRESHOLD_VEC_CAMERA2
#define OPTICFLOW_ACTFAST_MIN_GRADIENT_CAMERA2
#define OPTICFLOW_BODY_TO_CAM_PSI
#define OPTICFLOW_FAST9_THRESHOLD_CAMERA2
#define OPTICFLOW_CORNER_METHOD
#define OPTICFLOW_RESOLUTION_FACTOR
static int cmp_array(const void *a, const void *b)
Compare the rows of an integer (uint16_t) 2D array based on the first column.
#define OPTICFLOW_FAST9_MIN_DISTANCE_CAMERA2
#define OPTICFLOW_FEATURE_MANAGEMENT
#define OPTICFLOW_BODY_TO_CAM_PSI_CAMERA2
#define LINEAR_FIT
static int cmp_flow(const void *a, const void *b)
Compare two flow vectors based on flow distance Used for sorting.
#define OPTICFLOW_WINDOW_SIZE
#define OPTICFLOW_METHOD_CAMERA2
#define OPTICFLOW_TRACK_BACK_CAMERA2
void opticflow_calc_init(struct opticflow_t opticflow[])
Initialize the opticflow calculator.
#define OPTICFLOW_MAX_ITERATIONS_CAMERA2
#define OPTICFLOW_SHOW_FLOW_CAMERA2
#define OPTICFLOW_PYRAMID_LEVEL
bool opticflow_calc_frame(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow on a new image frame.
#define OPTICFLOW_TRACK_BACK
uint16_t n_time_steps[2]
#define EXHAUSTIVE_FAST
#define OPTICFLOW_ACTFAST_GRADIENT_METHOD_CAMERA2
struct MedianFilter3Float vel_filt
#define OPTICFLOW_ACTFAST_MIN_GRADIENT
#define FAST9_LOW_THRESHOLD
#define OPTICFLOW_FAST9_NUM_REGIONS
#define OPTICFLOW_MAX_TRACK_CORNERS
#define OPTICFLOW_FAST9_PADDING
#define OPTICFLOW_SEARCH_DISTANCE
#define OPTICFLOW_FAST9_PADDING_CAMERA2
#define OPTICFLOW_BODY_TO_CAM_THETA
#define OPTICFLOW_FAST9_REGION_DETECT
bool calc_edgeflow_tot(struct opticflow_t *opticflow, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with EDGEFLOW on a new image frame.
#define FAST9_HIGH_THRESHOLD
#define OPTICFLOW_DEROTATION
#define OPTICFLOW_ACTFAST_SHORT_STEP_CAMERA2
#define OPTICFLOW_BODY_TO_CAM_PHI_CAMERA2
#define OPTICFLOW_FAST9_REGION_DETECT_CAMERA2
#define OPTICFLOW_METHOD
#define OPTICFLOW_RESOLUTION_FACTOR_CAMERA2
#define SIZE_DIV
#define OPTICFLOW_BODY_TO_CAM_PHI
#define OPTICFLOW_MAX_TRACK_CORNERS_CAMERA2
#define OPTICFLOW_FAST9_THRESHOLD
#define OPTICFLOW_DEROTATION_CAMERA2
#define OPTICFLOW_WINDOW_SIZE_CAMERA2
#define OPTICFLOW_FAST9_ADAPTIVE
#define OPTICFLOW_CORNER_METHOD_CAMERA2
Calculate velocity from optic flow.
struct image_t prev_img_gray
Previous gray image frame.
bool track_back
Whether to track flow vectors back to the previous image, in order to check if the back-tracked flow ...
uint16_t fast9_min_distance
Minimum distance in pixels between corners.
float derotation_correction_factor_x
Correction factor for derotation in x axis, determined from a fit from the gyros and flow rotation....
uint16_t search_distance
Search distance for blockmatching alg.
uint16_t max_track_corners
Maximum amount of corners Lucas Kanade should track.
struct image_t img_gray
Current gray image frame.
uint8_t corner_method
Method to use for determining where the corners are.
bool got_first_img
If we got a image to work with.
struct point_t * fast9_ret_corners
Corners.
bool derotation
Derotation switched on or off (depended on the quality of the gyroscope measurement)
const struct video_config_t * camera
uint16_t resolution_factor
The resolution in EdgeFlow to determine the Divergence.
uint8_t max_iterations
The maximum amount of iterations the Lucas Kanade algorithm should do.
int actfast_gradient_method
Whether to use a simple or Sobel filter.
uint16_t subpixel_factor
The amount of subpixels per pixel.
float actfast_long_step
Step size to take when there is no texture.
bool show_flow
Whether to draw the flow vectors on the image. Watch out! This changes the image as will be received ...
bool fast9_adaptive
Whether the FAST9 threshold should be adaptive.
uint8_t fast9_threshold
FAST9 corner detection threshold.
int actfast_min_gradient
Threshold that decides when there is sufficient texture for edge following.
bool median_filter
Decides to use a median filter on the velocity.
uint8_t threshold_vec
The threshold in x, y subpixels which the algorithm should stop.
uint16_t window_size
Window size for the blockmatching algorithm (general value for all methods)
uint16_t fast9_padding
Padding used in FAST9 detector.
bool just_switched_method
Boolean to check if methods has been switched (for reinitialization)
bool feature_management
Decides whether to keep track corners in memory for the next frame instead of re-detecting every time...
bool fast9_region_detect
Decides whether to detect fast9 corners in specific regions of interest or the whole image (only for ...
uint8_t pyramid_level
Number of pyramid levels used in Lucas Kanade algorithm (0 == no pyramids used)
uint16_t fast9_rsize
Amount of corners allocated.
float derotation_correction_factor_y
Correction factor for derotation in Y axis, determined from a fit from the gyros and flow rotation....
float actfast_short_step
Step size to take when there is an edge to be followed.
uint8_t method
Method to use to calculate the optical flow.
#define FAST9_MAX_CORNERS
uint8_t fast9_num_regions
The number of regions of interest the image is split into.
struct opticflow_t opticflow[ACTIVE_CAMERAS]
Opticflow calculations.
float get_size_divergence(struct flow_t *vectors, int count, int n_samples)
Get divergence from optical flow vectors based on line sizes between corners.
Calculate divergence from flow vectors by looking at line sizes beteween the points.
static float K[9]
bool distorted_pixels_to_normalized_coords(float x_pd, float y_pd, float *x_n, float *y_n, float k, const float *K)
Transform distorted pixel coordinates to normalized coordinates.
Definition: undistortion.c:128
bool normalized_coords_to_distorted_pixels(float x_n, float y_n, float *x_pd, float *y_pd, float k, const float *K)
Transform normalized coordinates to distorted pixel coordinates.
Definition: undistortion.c:146
Functions for undistorting camera images.
float focal_y
focal length in the y-direction in pixels
Definition: video_device.h:48
float center_x
center image coordinate in the x-direction
Definition: video_device.h:49
float focal_x
focal length in the x-direction in pixels
Definition: video_device.h:47
struct camera_intrinsics_t camera_intrinsics
Intrinsics of the camera; camera calibration parameters and distortion parameter(s)
Definition: video_device.h:68
float Dhane_k
(un)distortion parameter for a fish-eye lens
Definition: video_device.h:51
float center_y
center image coordinate in the y-direction
Definition: video_device.h:50
char * dev_name
path to device
Definition: video_device.h:59
unsigned short uint16_t
Typedef defining 16 bit unsigned short type.
Definition: vl53l1_types.h:88
int int32_t
Typedef defining 32 bit int type.
Definition: vl53l1_types.h:83
unsigned int uint32_t
Typedef defining 32 bit unsigned int type.
Definition: vl53l1_types.h:78
short int16_t
Typedef defining 16 bit short type.
Definition: vl53l1_types.h:93
unsigned char uint8_t
Typedef defining 8 bit unsigned char type.
Definition: vl53l1_types.h:98
signed char int8_t
Typedef defining 8 bit char type.
Definition: vl53l1_types.h:103
float b
Definition: wedgebug.c:202