Paparazzi UAS  v5.12_stable-4-g9b43e9b
Paparazzi is a free software Unmanned Aircraft System.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
opticflow_calculator.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2014 Hann Woei Ho
3  * 2015 Freek van Tienen <freek.v.tienen@gmail.com>
4  * 2016 Kimberly McGuire <k.n.mcguire@tudelft.nl
5  *
6  * This file is part of Paparazzi.
7  *
8  * Paparazzi is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * Paparazzi is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with Paparazzi; see the file COPYING. If not, see
20  * <http://www.gnu.org/licenses/>.
21  */
22 
30 #include "std.h"
31 
32 #include <stdio.h>
33 #include <string.h>
34 #include <stdlib.h>
35 
36 // Own Header
37 #include "opticflow_calculator.h"
38 
39 // Computer Vision
40 #include "lib/vision/image.h"
42 #include "lib/vision/fast_rosten.h"
43 #include "lib/vision/edge_flow.h"
44 #include "size_divergence.h"
45 #include "linear_flow_fit.h"
46 
47 // Kalman filter
49 #include "subsystems/imu.h"
50 
51 
52 // whether to show the flow and corners:
53 #define OPTICFLOW_SHOW_FLOW 0
54 #define OPTICFLOW_SHOW_CORNERS 0
55 
56 // What methods are run to determine divergence, lateral flow, etc.
57 // SIZE_DIV looks at line sizes and only calculates divergence
58 #define SIZE_DIV 1
59 // LINEAR_FIT makes a linear optical flow field fit and extracts a lot of information:
60 // relative velocities in x, y, z (divergence / time to contact), the slope of the surface, and the surface roughness.
61 #define LINEAR_FIT 1
62 
63 // Camera parameters (defaults are from an ARDrone 2)
64 #ifndef OPTICFLOW_FOV_W
65 #define OPTICFLOW_FOV_W 0.89360857702
66 #endif
67 PRINT_CONFIG_VAR(OPTICFLOW_FOV_W)
68 
69 #ifndef OPTICFLOW_FOV_H
70 #define OPTICFLOW_FOV_H 0.67020643276
71 #endif
72 PRINT_CONFIG_VAR(OPTICFLOW_FOV_H)
73 
74 #ifndef OPTICFLOW_FX
75 #define OPTICFLOW_FX 343.1211
76 #endif
77 PRINT_CONFIG_VAR(OPTICFLOW_FX)
78 
79 #ifndef OPTICFLOW_FY
80 #define OPTICFLOW_FY 348.5053
81 #endif
82 PRINT_CONFIG_VAR(OPTICFLOW_FY)
83 
84 /* Set the default values */
85 #ifndef OPTICFLOW_MAX_TRACK_CORNERS
86 #define OPTICFLOW_MAX_TRACK_CORNERS 25
87 #endif
88 PRINT_CONFIG_VAR(OPTICFLOW_MAX_TRACK_CORNERS)
89 
90 #ifndef OPTICFLOW_WINDOW_SIZE
91 #define OPTICFLOW_WINDOW_SIZE 10
92 #endif
93 PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE)
94 
95 #ifndef OPTICFLOW_SEARCH_DISTANCE
96 #define OPTICFLOW_SEARCH_DISTANCE 20
97 #endif
98 PRINT_CONFIG_VAR(OPTICFLOW_SEARCH_DISTANCE)
99 
100 #ifndef OPTICFLOW_SUBPIXEL_FACTOR
101 #define OPTICFLOW_SUBPIXEL_FACTOR 10
102 #endif
103 PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR)
104 
105 #ifndef OPTICFLOW_RESOLUTION_FACTOR
106 #define OPTICFLOW_RESOLUTION_FACTOR 100
107 #endif
108 PRINT_CONFIG_VAR(OPTICFLOW_RESOLUTION_FACTOR)
109 
110 #ifndef OPTICFLOW_MAX_ITERATIONS
111 #define OPTICFLOW_MAX_ITERATIONS 10
112 #endif
113 PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS)
114 
115 #ifndef OPTICFLOW_THRESHOLD_VEC
116 #define OPTICFLOW_THRESHOLD_VEC 2
117 #endif
118 PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC)
119 
120 #ifndef OPTICFLOW_PYRAMID_LEVEL
121 #define OPTICFLOW_PYRAMID_LEVEL 2
122 #endif
123 PRINT_CONFIG_VAR(OPTICFLOW_PYRAMID_LEVEL)
124 
125 #ifndef OPTICFLOW_FAST9_ADAPTIVE
126 #define OPTICFLOW_FAST9_ADAPTIVE TRUE
127 #endif
128 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE)
129 
130 #ifndef OPTICFLOW_FAST9_THRESHOLD
131 #define OPTICFLOW_FAST9_THRESHOLD 20
132 #endif
133 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD)
134 
135 #ifndef OPTICFLOW_FAST9_MIN_DISTANCE
136 #define OPTICFLOW_FAST9_MIN_DISTANCE 10
137 #endif
138 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE)
139 
140 #ifndef OPTICFLOW_FAST9_PADDING
141 #define OPTICFLOW_FAST9_PADDING 20
142 #endif
143 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_PADDING)
144 
145 // thresholds FAST9 that are currently not set from the GCS:
146 #define FAST9_LOW_THRESHOLD 5
147 #define FAST9_HIGH_THRESHOLD 60
148 
149 #ifndef OPTICFLOW_METHOD
150 #define OPTICFLOW_METHOD 0
151 #endif
152 PRINT_CONFIG_VAR(OPTICFLOW_METHOD)
153 
154 #if OPTICFLOW_METHOD > 1
155 #error WARNING: Both Lukas Kanade and EdgeFlow are NOT selected
156 #endif
157 
158 #ifndef OPTICFLOW_DEROTATION
159 #define OPTICFLOW_DEROTATION TRUE
160 #endif
161 PRINT_CONFIG_VAR(OPTICFLOW_DEROTATION)
162 
163 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
164 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X 1.0
165 #endif
167 
168 #ifndef OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
169 #define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y 1.0
170 #endif
172 
173 #ifndef OPTICFLOW_MEDIAN_FILTER
174 #define OPTICFLOW_MEDIAN_FILTER FALSE
175 #endif
176 PRINT_CONFIG_VAR(OPTICFLOW_MEDIAN_FILTER)
177 
178 #ifndef OPTICFLOW_KALMAN_FILTER
179 #define OPTICFLOW_KALMAN_FILTER TRUE
180 #endif
181 PRINT_CONFIG_VAR(OPTICFLOW_KALMAN_FILTER)
182 
183 #ifndef OPTICFLOW_KALMAN_FILTER_PROCESS_NOISE
184 #define OPTICFLOW_KALMAN_FILTER_PROCESS_NOISE 0.01
185 #endif
187 
188 #ifndef OPTICFLOW_FEATURE_MANAGEMENT
189 #define OPTICFLOW_FEATURE_MANAGEMENT 1
190 #endif
191 PRINT_CONFIG_VAR(OPTICFLOW_FEATURE_MANAGEMENT)
192 
193 #ifndef OPTICFLOW_FAST9_REGION_DETECT
194 #define OPTICFLOW_FAST9_REGION_DETECT 1
195 #endif
196 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_REGION_DETECT)
197 
198 #ifndef OPTICFLOW_FAST9_NUM_REGIONS
199 #define OPTICFLOW_FAST9_NUM_REGIONS 9
200 #endif
201 PRINT_CONFIG_VAR(OPTICFLOW_FAST9_NUM_REGIONS)
202 
203 //Include median filter
204 #include "filters/median_filter.h"
205 struct MedianFilterInt vel_x_filt, vel_y_filt;
206 
207 
208 /* Functions only used here */
209 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime);
210 static int cmp_flow(const void *a, const void *b);
211 static int cmp_array(const void *a, const void *b);
212 
213 
214 
220 {
221  /* Set the default values */
222  opticflow->method = OPTICFLOW_METHOD; //0 = LK_fast9, 1 = Edgeflow
223  opticflow->window_size = OPTICFLOW_WINDOW_SIZE;
225  opticflow->derotation = OPTICFLOW_DEROTATION; //0 = OFF, 1 = ON
228 
241 
246  opticflow->fast9_rsize = 512;
247  opticflow->fast9_ret_corners = malloc(sizeof(struct point_t) * opticflow->fast9_rsize);
248 }
256 void calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct opticflow_state_t *cam_state, struct image_t *img,
257  struct opticflow_result_t *result)
258 {
259  if (opticflow->just_switched_method) {
260  // Create the image buffers
261  image_create(&opticflow->img_gray, img->w, img->h, IMAGE_GRAYSCALE);
262  image_create(&opticflow->prev_img_gray, img->w, img->h, IMAGE_GRAYSCALE);
263 
264  // Set the previous values
265  opticflow->got_first_img = false;
266  FLOAT_RATES_ZERO(opticflow->prev_rates);
267 
268  // Init median filters with zeros
271  }
272 
273  // variables for size_divergence:
274  float size_divergence; int n_samples;
275 
276  // variables for linear flow fit:
277  float error_threshold;
278  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
279  struct linear_flow_fit_info fit_info;
280 
281  // Update FPS for information
282  result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
283  opticflow->prev_timestamp = img->ts;
284 
285  // Convert image to grayscale
286  image_to_grayscale(img, &opticflow->img_gray);
287 
288  // Copy to previous image if not set
289  if (!opticflow->got_first_img) {
290  image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
291  opticflow->got_first_img = true;
292  }
293 
294  // *************************************************************************************
295  // Corner detection
296  // *************************************************************************************
297 
298  // if feature_management is selected and tracked corners drop below a threshold, redetect
299  if ((opticflow->feature_management) && (result->corner_cnt < opticflow->max_track_corners / 2)) {
300  // no need for "per region" re-detection when there are no previous corners
301  if ((!opticflow->fast9_region_detect) || (result->corner_cnt == 0)) {
302  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
303  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
304  &opticflow->fast9_rsize,
305  &opticflow->fast9_ret_corners,
306  NULL);
307  } else {
308  // allocating memory and initializing the 2d array that holds the number of corners per region and its index (for the sorting)
309  uint16_t **region_count = malloc(opticflow->fast9_num_regions * sizeof(uint16_t *));
310  for (uint16_t i = 0; i < opticflow->fast9_num_regions ; i++) {
311  region_count[i] = malloc(sizeof(uint16_t) * 2);
312  region_count[i][0] = 0;
313  region_count[i][1] = i;
314  }
315  for (uint16_t i = 0; i < result->corner_cnt; i++) {
316  region_count[(opticflow->fast9_ret_corners[i].x / (img->w / (uint8_t)sqrt(opticflow->fast9_num_regions))
317  + opticflow->fast9_ret_corners[i].y / (img->h / (uint8_t)sqrt(opticflow->fast9_num_regions)) * (uint8_t)sqrt(opticflow->fast9_num_regions))][0]++;
318  }
319 
320  //sorting region_count array according to first column (number of corners).
321  qsort(region_count, opticflow->fast9_num_regions, sizeof(region_count[0]), cmp_array);
322 
323  // Detecting corners from the region with the less to the one with the most, until a desired total is reached.
324  for (uint16_t i = 0; i < opticflow->fast9_num_regions && result->corner_cnt < 2 * opticflow->max_track_corners ; i++) {
325 
326  // Find the boundaries of the region of interest
327  uint16_t *roi = malloc(4 * sizeof(uint16_t));
328  roi[0] = (region_count[i][1] % (uint8_t)sqrt(opticflow->fast9_num_regions)) * (img->w / (uint8_t)sqrt(opticflow->fast9_num_regions));
329  roi[1] = (region_count[i][1] / (uint8_t)sqrt(opticflow->fast9_num_regions)) * (img->h / (uint8_t)sqrt(opticflow->fast9_num_regions));
330  roi[2] = roi[0] + (img->w / (uint8_t)sqrt(opticflow->fast9_num_regions));
331  roi[3] = roi[1] + (img->h / (uint8_t)sqrt(opticflow->fast9_num_regions));
332 
333  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
334  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
335  &opticflow->fast9_rsize,
336  &opticflow->fast9_ret_corners,
337  roi);
338  free(roi);
339  }
340  for (uint16_t i = 0; i < opticflow->fast9_num_regions; i++) {
341  free(region_count[i]);
342  }
343  free(region_count);
344  }
345  } else if (!opticflow->feature_management) {
346  // needs to be set to 0 because result is now static
347  result->corner_cnt = 0;
348  // FAST corner detection
349  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
350  // to 0 (see defines), however a more permanent solution should be considered
351  fast9_detect(&opticflow->prev_img_gray, opticflow->fast9_threshold, opticflow->fast9_min_distance,
352  opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
353  &opticflow->fast9_rsize,
354  &opticflow->fast9_ret_corners,
355  NULL);
356 
357  // Adaptive threshold
358  if (opticflow->fast9_adaptive) {
359  // Decrease and increase the threshold based on previous values
360  if (result->corner_cnt < 40
361  && opticflow->fast9_threshold > FAST9_LOW_THRESHOLD) { // TODO: Replace 40 with OPTICFLOW_MAX_TRACK_CORNERS / 2
362  opticflow->fast9_threshold--;
363  } else if (result->corner_cnt > OPTICFLOW_MAX_TRACK_CORNERS * 2 && opticflow->fast9_threshold < FAST9_HIGH_THRESHOLD) {
364  opticflow->fast9_threshold++;
365  }
366  }
367  }
368 
369 #if OPTICFLOW_SHOW_CORNERS
370  image_show_points(img, opticflow->fast9_ret_corners, result->corner_cnt);
371 #endif
372 
373  // Check if we found some corners to track
374  if (result->corner_cnt < 1) {
375  // Clear the result otherwise the previous values will be returned for this frame too
376  memset(result, 0, sizeof(struct opticflow_result_t));
377  image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
378  return;
379  }
380 
381  // *************************************************************************************
382  // Corner Tracking
383  // *************************************************************************************
384 
385  // Execute a Lucas Kanade optical flow
386  result->tracked_cnt = result->corner_cnt;
387  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, opticflow->fast9_ret_corners,
388  &result->tracked_cnt,
389  opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
390  opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level);
391 
392 #if OPTICFLOW_SHOW_FLOW
393  image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
394 #endif
395 
396  // Estimate size divergence:
397  if (SIZE_DIV) {
398  n_samples = 100;
399  size_divergence = get_size_divergence(vectors, result->tracked_cnt, n_samples);
400  result->div_size = size_divergence;
401  } else {
402  result->div_size = 0.0f;
403  }
404  if (LINEAR_FIT) {
405  // Linear flow fit (normally derotation should be performed before):
406  error_threshold = 10.0f;
407  n_iterations_RANSAC = 20;
408  n_samples_RANSAC = 5;
409  success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
410  n_samples_RANSAC, img->w, img->h, &fit_info);
411 
412  if (!success_fit) {
413  fit_info.divergence = 0.0f;
414  fit_info.surface_roughness = 0.0f;
415  }
416 
417  result->divergence = fit_info.divergence;
418  result->surface_roughness = fit_info.surface_roughness;
419  } else {
420  result->divergence = 0.0f;
421  result->surface_roughness = 0.0f;
422  }
423 
424 
425  // Get the median flow
426  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
427  if (result->tracked_cnt == 0) {
428  // We got no flow
429  result->flow_x = 0;
430  result->flow_y = 0;
431  } else if (result->tracked_cnt > 3) {
432  // Take the average of the 3 median points
433  result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
434  result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
435  result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
436  result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
437  result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
438  result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
439  result->flow_x /= 3;
440  result->flow_y /= 3;
441  } else {
442  // Take the median point
443  result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
444  result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
445  }
446 
447  // Flow Derotation
448  float diff_flow_x = 0;
449  float diff_flow_y = 0;
450 
451  /*// Flow Derotation TODO:
452  float diff_flow_x = (cam_state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
453  float diff_flow_y = (cam_state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;*/
454 
455  if (opticflow->derotation && result->tracked_cnt > 5) {
456  diff_flow_x = (cam_state->rates.p) / result->fps * img->w /
457  OPTICFLOW_FOV_W;// * img->w / OPTICFLOW_FOV_W;
458  diff_flow_y = (cam_state->rates.q) / result->fps * img->h /
459  OPTICFLOW_FOV_H;// * img->h / OPTICFLOW_FOV_H;
460  }
461 
462  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
464  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
466  opticflow->prev_rates = cam_state->rates;
467 
468  // Velocity calculation
469  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
470  // TODO Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
471  float vel_x = result->flow_der_x * result->fps * cam_state->agl / opticflow->subpixel_factor / OPTICFLOW_FX;
472  float vel_y = result->flow_der_y * result->fps * cam_state->agl / opticflow->subpixel_factor / OPTICFLOW_FY;
473 
474  //Apply a median filter to the velocity if wanted
475  if (opticflow->median_filter == true) {
476  result->vel_x = (float)update_median_filter_i(&vel_x_filt, (int32_t)(vel_x * 1000)) / 1000;
477  result->vel_y = (float)update_median_filter_i(&vel_y_filt, (int32_t)(vel_y * 1000)) / 1000;
478  } else {
479  result->vel_x = vel_x;
480  result->vel_y = vel_y;
481  }
482  // Velocity calculation: uncomment if focal length of the camera is not known or incorrect.
483  // result->vel_x = - result->flow_der_x * result->fps * cam_state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_W / img->w
484  // result->vel_y = result->flow_der_y * result->fps * cam_state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_H / img->h
485 
486 
487  // Determine quality of noise measurement for state filter
488  //TODO develop a noise model based on groundtruth
489 
490  float noise_measurement_temp = (1 - ((float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25)));
491  result->noise_measurement = noise_measurement_temp;
492 
493  // *************************************************************************************
494  // Next Loop Preparation
495  // *************************************************************************************
496 
497  if (opticflow->feature_management) {
498  result->corner_cnt = result->tracked_cnt;
499  //get the new positions of the corners and the "residual" subpixel positions
500  for (uint16_t i = 0; i < result->tracked_cnt; i++) {
501  opticflow->fast9_ret_corners[i].x = (uint32_t)((vectors[i].pos.x + (float)vectors[i].flow_x) / opticflow->subpixel_factor);
502  opticflow->fast9_ret_corners[i].y = (uint32_t)((vectors[i].pos.y + (float)vectors[i].flow_y) / opticflow->subpixel_factor);
503  opticflow->fast9_ret_corners[i].x_sub = (uint16_t)((vectors[i].pos.x + vectors[i].flow_x) % opticflow->subpixel_factor);
504  opticflow->fast9_ret_corners[i].y_sub = (uint16_t)((vectors[i].pos.y + vectors[i].flow_y) % opticflow->subpixel_factor);
505  opticflow->fast9_ret_corners[i].count = vectors[i].pos.count;
506  }
507  }
508  free(vectors);
509  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
510 }
511 
519 void calc_edgeflow_tot(struct opticflow_t *opticflow, struct opticflow_state_t *cam_state, struct image_t *img,
520  struct opticflow_result_t *result)
521 {
522  // Define Static Variables
523  static struct edge_hist_t edge_hist[MAX_HORIZON];
524  static uint8_t current_frame_nr = 0;
525  struct edge_flow_t edgeflow;
526  static uint8_t previous_frame_offset[2] = {1, 1};
527 
528  // Define Normal variables
529  struct edgeflow_displacement_t displacement;
530  displacement.x = malloc(sizeof(int32_t) * img->w);
531  displacement.y = malloc(sizeof(int32_t) * img->h);
532 
533  // If the methods just switched to this one, reintialize the
534  // array of edge_hist structure.
535  if (opticflow->just_switched_method == 1) {
536  int i;
537  for (i = 0; i < MAX_HORIZON; i++) {
538  edge_hist[i].x = malloc(sizeof(int32_t) * img->w);
539  edge_hist[i].y = malloc(sizeof(int32_t) * img->h);
540  FLOAT_RATES_ZERO(edge_hist[i].rates);
541  }
542  }
543 
544  uint16_t disp_range;
545  if (opticflow->search_distance < DISP_RANGE_MAX) {
546  disp_range = opticflow->search_distance;
547  } else {
548  disp_range = DISP_RANGE_MAX;
549  }
550 
551  uint16_t window_size;
552 
553  if (opticflow->window_size < MAX_WINDOW_SIZE) {
554  window_size = opticflow->window_size;
555  } else {
556  window_size = MAX_WINDOW_SIZE;
557  }
558 
559  uint16_t RES = opticflow->resolution_factor;
560 
561  //......................Calculating EdgeFlow..................... //
562 
563  // Calculate current frame's edge histogram
564  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
565  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
566  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
567  calculate_edge_histogram(img, edge_hist_y, 'y', 0);
568 
569 
570  // Copy frame time and angles of image to calculated edge histogram
571  edge_hist[current_frame_nr].frame_time = img->ts;
572  edge_hist[current_frame_nr].rates = cam_state->rates;
573 
574  // Calculate which previous edge_hist to compare with the current
575  uint8_t previous_frame_nr[2];
576  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);
577 
578  //Select edge histogram from the previous frame nr
579  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
580  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;
581 
582  //Calculate the corresponding derotation of the two frames
583  int16_t der_shift_x = 0;
584  int16_t der_shift_y = 0;
585 
586  if (opticflow->derotation) {
587  der_shift_x = (int16_t)(edge_hist[current_frame_nr].rates.p /
588  result->fps *
589  (float)img->w / (OPTICFLOW_FOV_W) * opticflow->derotation_correction_factor_x);
590  der_shift_y = (int16_t)(edge_hist[current_frame_nr].rates.q /
591  result->fps *
592  (float)img->h / (OPTICFLOW_FOV_H) * opticflow->derotation_correction_factor_y);
593  }
594 
595  // Estimate pixel wise displacement of the edge histograms for x and y direction
596  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
597  displacement.x, img->w,
598  window_size, disp_range, der_shift_x);
599  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
600  displacement.y, img->h,
601  window_size, disp_range, der_shift_y);
602 
603  // Fit a line on the pixel displacement to estimate
604  // the global pixel flow and divergence (RES is resolution)
605  line_fit(displacement.x, &edgeflow.div_x,
606  &edgeflow.flow_x, img->w,
607  window_size + disp_range, RES);
608  line_fit(displacement.y, &edgeflow.div_y,
609  &edgeflow.flow_y, img->h,
610  window_size + disp_range, RES);
611 
612  /* Save Resulting flow in results
613  * Warning: The flow detected here is different in sign
614  * and size, therefore this will be divided with
615  * the same subpixel factor and -1 to make it on par with
616  * the LK algorithm of t opticalflow_calculator.c
617  * */
618  edgeflow.flow_x = -1 * edgeflow.flow_x;
619  edgeflow.flow_y = -1 * edgeflow.flow_y;
620 
621  edgeflow.flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
622  edgeflow.flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];
623 
624  result->flow_x = (int16_t)edgeflow.flow_x / RES;
625  result->flow_y = (int16_t)edgeflow.flow_y / RES;
626 
627  //Fill up the results optic flow to be on par with LK_fast9
628  result->flow_der_x = result->flow_x;
629  result->flow_der_y = result->flow_y;
630  result->corner_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
631  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
632  result->divergence = (float)edgeflow.div_x / RES;
633  result->div_size = 0.0f;
634  result->noise_measurement = 0.0f;
635  result->surface_roughness = 0.0f;
636 
637  //......................Calculating VELOCITY ..................... //
638 
639  /*Estimate fps per direction
640  * This is the fps with adaptive horizon for subpixel flow, which is not similar
641  * to the loop speed of the algorithm. The faster the quadcopter flies
642  * the higher it becomes
643  */
644  float fps_x = 0;
645  float fps_y = 0;
646  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
647  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
648  fps_x = 1 / (time_diff_x);
649  fps_y = 1 / (time_diff_y);
650 
651  result->fps = fps_x;
652 
653  // Calculate velocity
654  float vel_x = edgeflow.flow_x * fps_x * cam_state->agl * OPTICFLOW_FOV_W / (img->w * RES);
655  float vel_y = edgeflow.flow_y * fps_y * cam_state->agl * OPTICFLOW_FOV_H / (img->h * RES);
656 
657  //Apply a median filter to the velocity if wanted
658  if (opticflow->median_filter == true) {
659  result->vel_x = (float)update_median_filter_i(&vel_x_filt, (int32_t)(vel_x * 1000)) / 1000;
660  result->vel_y = (float)update_median_filter_i(&vel_y_filt, (int32_t)(vel_y * 1000)) / 1000;
661  } else {
662  result->vel_x = vel_x;
663  result->vel_y = vel_y;
664  }
665 
666  result->noise_measurement = 0.2;
667 
668 
669 
670 #if OPTICFLOW_SHOW_FLOW
671  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
672 #endif
673  // Increment and wrap current time frame
674  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
675 
676  // Free malloc'd variables
677  free(displacement.x);
678  free(displacement.y);
679 }
680 
681 
689 void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *cam_state, struct image_t *img,
690  struct opticflow_result_t *result)
691 {
692 
693  // A switch counter that checks in the loop if the current method is similar,
694  // to the previous (for reinitializing structs)
695  static int8_t switch_counter = -1;
696  if (switch_counter != opticflow->method) {
697  opticflow->just_switched_method = true;
698  switch_counter = opticflow->method;
699  // Clear the static result
700  memset(result, 0, sizeof(struct opticflow_result_t));
701  } else {
702  opticflow->just_switched_method = false;
703  }
704 
705  // Switch between methods (0 = fast9/lukas-kanade, 1 = EdgeFlow)
706  if (opticflow->method == 0) {
707  calc_fast9_lukas_kanade(opticflow, cam_state, img, result);
708  } else if (opticflow->method == 1) {
709  calc_edgeflow_tot(opticflow, cam_state, img, result);
710  }
711 
712  /* Rotate velocities from camera frame coordinates to body coordinates for control
713  * IMPORTANT!!! This frame to body orientation should be the case for the Parrot
714  * ARdrone and Bebop, however this can be different for other quadcopters
715  * ALWAYS double check!
716  */
717  result->vel_body_x = result->vel_y;
718  result->vel_body_y = - result->vel_x;
719 
720  // KALMAN filter on velocity
721  float measurement_noise[2] = {result->noise_measurement, 1.0f};
722  static bool reinitialize_kalman = true;
723 
724  static uint8_t wait_filter_counter =
725  0; // When starting up the opticalflow module, or switching between methods, wait for a bit to prevent bias
726 
727 
728  if (opticflow->kalman_filter == true) {
729  if (opticflow->just_switched_method == true) {
730  wait_filter_counter = 0;
731  reinitialize_kalman = true;
732  }
733 
734  if (wait_filter_counter > 50) {
735 
736  // Get accelerometer values rotated to body axis
737  // TODO: use acceleration from the state ?
738  struct FloatVect3 accel_imu_f;
739  ACCELS_FLOAT_OF_BFP(accel_imu_f, cam_state->accel_imu_meas);
740  struct FloatVect3 accel_meas_body;
741  float_quat_vmult(&accel_meas_body, &cam_state->imu_to_body_quat, &accel_imu_f);
742 
743  float acceleration_measurement[2];
744  acceleration_measurement[0] = accel_meas_body.x;
745  acceleration_measurement[1] = accel_meas_body.y;
746 
747  kalman_filter_opticflow_velocity(&result->vel_body_x, &result->vel_body_y, acceleration_measurement, result->fps,
748  measurement_noise, opticflow->kalman_filter_process_noise, reinitialize_kalman);
749  if (reinitialize_kalman) {
750  reinitialize_kalman = false;
751  }
752 
753  } else {
754  wait_filter_counter++;
755  }
756  } else {
757  reinitialize_kalman = true;
758  }
759 
760 }
761 
772 void kalman_filter_opticflow_velocity(float *velocity_x, float *velocity_y, float *acceleration_measurement, float fps,
773  float *measurement_noise, float kalman_process_noise, bool reinitialize_kalman)
774 {
775  // Initialize variables
776  static float covariance_x[4], covariance_y[4], state_estimate_x[2], state_estimate_y[2];
777  float measurements_x[2], measurements_y[2];
778 
779  if (reinitialize_kalman) {
780  state_estimate_x[0] = 0.0f;
781  state_estimate_x[1] = 0.0f;
782  covariance_x[0] = 1.0f;
783  covariance_x[1] = 1.0f;
784  covariance_x[2] = 1.0f;
785  covariance_x[3] = 1.0f;
786 
787  state_estimate_y[0] = 0.0f;
788  state_estimate_y[1] = 0.0f;
789  covariance_y[0] = 1.0f;
790  covariance_y[1] = 1.0f;
791  covariance_y[2] = 1.0f;
792  covariance_y[3] = 1.0f;
793  }
794 
795  /*Model for velocity estimation
796  * state = [ velocity; acceleration]
797  * Velocity_prediction = last_velocity_estimate + acceleration * dt
798  * Acceleration_prediction = last_acceleration
799  * model = Jacobian([vel_prediction; accel_prediction],state)
800  * = [1 dt ; 0 1];
801  * */
802  float model[4] = {1.0f, 1.0f / fps , 0.0f , 1.0f};
803  float process_noise[2] = {kalman_process_noise, kalman_process_noise};
804 
805  // Measurements from velocity_x of optical flow and acceleration directly from scaled accelerometers
806  measurements_x[0] = *velocity_x;
807  measurements_x[1] = acceleration_measurement[0];
808 
809  measurements_y[0] = *velocity_y;
810  measurements_y[1] = acceleration_measurement[1];
811 
812  // 2D linear kalman filter
813  kalman_filter_linear_2D_float(model, measurements_x, covariance_x, state_estimate_x, process_noise, measurement_noise);
814  kalman_filter_linear_2D_float(model, measurements_y, covariance_y, state_estimate_y, process_noise, measurement_noise);
815 
816  *velocity_x = state_estimate_x[0];
817  *velocity_y = state_estimate_y[0];
818 }
819 
826 static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
827 {
828  uint32_t msec;
829  msec = (finishtime->tv_sec - starttime->tv_sec) * 1000;
830  msec += (finishtime->tv_usec - starttime->tv_usec) / 1000;
831  return msec;
832 }
833 
841 static int cmp_flow(const void *a, const void *b)
842 {
843  const struct flow_t *a_p = (const struct flow_t *)a;
844  const struct flow_t *b_p = (const struct flow_t *)b;
845  return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y *
846  b_p->flow_y);
847 }
848 
856 static int cmp_array(const void *a, const void *b)
857 {
858  const uint16_t *pa = *(const uint16_t **)a;
859  const uint16_t *pb = *(const uint16_t **)b;
860  return pa[0] - pb[0];
861 }
862 
unsigned short uint16_t
Definition: types.h:16
float vel_body_x
The velocity in the x direction (body fixed coordinates)
int16_t flow_y
Flow in y direction from the camera (in subpixels)
uint16_t fast9_min_distance
Minimum distance in pixels between corners.
void calculate_edge_displacement(int32_t *edge_histogram, int32_t *edge_histogram_prev, int32_t *displacement, uint16_t size, uint8_t window, uint8_t disp_range, int32_t der_shift)
Calculate_displacement calculates the displacement between two histograms.
Definition: edge_flow.c:168
int16_t flow_der_y
The derotated flow calculation in the y direction (in subpixels)
#define FAST9_HIGH_THRESHOLD
int32_t flow_x
Definition: edge_flow.h:77
float div_size
Divergence as determined with the size_divergence script.
bool feature_management
Decides whether to keep track corners in memory for the next frame instead of re-detecting every time...
uint8_t max_iterations
The maximum amount of iterations the Lucas Kanade algorithm should do.
#define MAX_WINDOW_SIZE
Definition: edge_flow.h:55
struct opticflow_t opticflow
Opticflow calculations.
uint16_t window_size
Window size for the blockmatching algorithm (general value for all methods)
static gazebo::physics::ModelPtr model
struct FloatRates rates
Body rates.
void kalman_filter_linear_2D_float(float *model, float *measurements, float *covariance, float *state, float *process_noise, float *measurement_noise)
A simple linear 2D kalman filter, computed using floats and matrices.
calculate optical flow with EdgeFlow
void image_switch(struct image_t *a, struct image_t *b)
This will switch image *a and *b This is faster as image_copy because it doesn't copy the whole image...
Definition: image.c:97
uint16_t fast9_rsize
Amount of corners allocated.
#define OPTICFLOW_PYRAMID_LEVEL
struct flow_t * opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint8_t max_points, uint8_t pyramid_level)
Definition: lucas_kanade.c:73
Calculate velocity from optic flow.
bool median_filter
Decides to use a median filter on the velocity.
#define OPTICFLOW_MEDIAN_FILTER
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
Definition: image.c:38
Definition: image.h:43
#define FLOAT_RATES_ZERO(_r)
int32_t div_x
Definition: edge_flow.h:78
void draw_edgeflow_img(struct image_t *img, struct edge_flow_t edgeflow, int32_t *edge_hist_x_prev, int32_t *edge_hist_x)
Draws edgehistogram, displacement and linefit directly on the image for debugging (only for edgeflow ...
Definition: edge_flow.c:297
Definition: image.h:66
int32_t * y
Definition: edge_flow.h:66
int32_t flow_y
Definition: edge_flow.h:79
#define OPTICFLOW_THRESHOLD_VEC
#define OPTICFLOW_FAST9_MIN_DISTANCE
void image_copy(struct image_t *input, struct image_t *output)
Copy an image from inut to output This will only work if the formats are the same.
Definition: image.c:77
#define OPTICFLOW_FAST9_ADAPTIVE
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt)
Show points in an image by coloring them through giving the pixels the maximum value.
Definition: image.c:552
static int cmp_flow(const void *a, const void *b)
Compare two flow vectors based on flow distance Used for sorting.
#define RES
Definition: detect_window.c:27
Calculate divergence from flow vectors by looking at line sizes beteween the points.
uint32_t x
The x coordinate of the point.
Definition: image.h:58
float q
in rad/s
float p
in rad/s
uint16_t resolution_factor
The resolution in EdgeFlow to determine the Divergence.
static int32_t update_median_filter_i(struct MedianFilterInt *filter, int32_t new_data)
Definition: median_filter.h:68
uint8_t threshold_vec
The threshold in x, y subpixels which the algorithm should stop.
void image_show_flow(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor)
Shows the flow from a specific point to a new point This works on YUV422 and Grayscale images...
Definition: image.c:577
void calculate_edge_histogram(struct image_t *img, int32_t edge_histogram[], char direction, uint16_t edge_threshold)
Calculate a edge/gradient histogram for each dimension of the image.
Definition: edge_flow.c:88
bool derotation
Derotation switched on or off (depended on the quality of the gyroscope measurement) ...
uint16_t y_sub
The y subpixel coordinate of the point.
Definition: image.h:62
uint8_t method
Method to use to calculate the optical flow.
uint8_t max_track_corners
Maximum amount of corners Lucas Kanade should track.
float agl
height above ground [m]
uint8_t fast9_num_regions
The number of regions of interest the image is split into.
struct MedianFilterInt vel_x_filt vel_y_filt
void opticflow_calc_init(struct opticflow_t *opticflow)
Initialize the opticflow calculator.
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_Y
float get_size_divergence(struct flow_t *vectors, int count, int n_samples)
Get divergence from optical flow vectors based on line sizes between corners.
Image helper functions like resizing, color filter, converters...
#define MAX_HORIZON
Definition: edge_flow.h:45
uint16_t tracked_cnt
The amount of tracked corners.
float vel_body_y
The velocity in the y direction (body fixed coordinates)
#define OPTICFLOW_RESOLUTION_FACTOR
#define OPTICFLOW_FEATURE_MANAGEMENT
uint16_t subpixel_factor
The amount of subpixels per pixel.
#define OPTICFLOW_FAST9_PADDING
int16_t flow_x
The x direction flow in subpixels.
Definition: image.h:68
struct point_t * fast9_ret_corners
Corners.
struct timeval prev_timestamp
Timestamp of the previous frame, used for FPS calculation.
uint16_t w
Image width.
Definition: image.h:45
void calc_edgeflow_tot(struct opticflow_t *opticflow, struct opticflow_state_t *cam_state, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with EDGEFLOW on a new image frame.
unsigned long uint32_t
Definition: types.h:18
void calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct opticflow_state_t *cam_state, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow with fast9 and lukaskanade on a new image frame.
#define OPTICFLOW_FAST9_THRESHOLD
#define OPTICFLOW_FOV_W
uint16_t h
Image height.
Definition: image.h:46
struct image_t prev_img_gray
Previous gray image frame.
signed short int16_t
Definition: types.h:17
#define OPTICFLOW_METHOD
struct Int32Vect3 accel_imu_meas
imu acceleration in imu's coordinates
void float_quat_vmult(struct FloatVect3 *v_out, struct FloatQuat *q, const struct FloatVect3 *v_in)
rotate 3D vector by quaternion.
uint8_t fast9_threshold
FAST9 corner detection threshold.
bool just_switched_method
Boolean to check if methods has been switched (for reinitialization)
#define OPTICFLOW_FAST9_REGION_DETECT
#define OPTICFLOW_FX
struct FloatQuat imu_to_body_quat
imu to body quaternion
struct point_t pos
The original position the flow comes from.
Definition: image.h:67
Inertial Measurement Unit interface.
#define ACCELS_FLOAT_OF_BFP(_ef, _ei)
Definition: pprz_algebra.h:739
uint32_t y
The y coordinate of the point.
Definition: image.h:59
#define OPTICFLOW_WINDOW_SIZE
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
Definition: image.c:116
Definition: image.h:57
#define OPTICFLOW_FOV_H
signed long int32_t
Definition: types.h:19
uint16_t x_sub
The x subpixel coordinate of the point.
Definition: image.h:61
struct FloatRates rates
Definition: edge_flow.h:68
struct image_t img_gray
Current gray image frame.
static int cmp_array(const void *a, const void *b)
Compare the rows of an integer (uint16_t) 2D array based on the first column.
float vel_x
The velocity in the x direction (image coordinates)
#define OPTICFLOW_FAST9_NUM_REGIONS
#define SIZE_DIV
uint16_t corner_cnt
The amount of coners found by FAST9.
float vel_y
The velocity in the y direction (image coordinates)
#define OPTICFLOW_FY
unsigned char uint8_t
Definition: types.h:14
float fps
Frames per second of the optical flow calculation.
efficient fixed-point optical-flow calculation
int analyze_linear_flow_field(struct flow_t *vectors, int count, float error_threshold, int n_iterations, int n_samples, int im_width, int im_height, struct linear_flow_fit_info *info)
Analyze a linear flow field, retrieving information such as divergence, surface roughness, focus of expansion, etc.
#define OPTICFLOW_DEROTATION
struct timeval ts
The timestamp of creation.
Definition: image.h:47
bool kalman_filter
Decide to use Kalman filter to filter velocity with accelerometers.
bool got_first_img
If we got a image to work with.
float divergence
Basically, relative_velocity_z. Actual divergence of a 2D flow field is 2 * relative_velocity_z.
#define OPTICFLOW_SEARCH_DISTANCE
bool fast9_region_detect
Decides whether to detect fast9 corners in specific regions of interest or the whole image (only for ...
static void init_median_filter_i(struct MedianFilterInt *filter, uint8_t size)
Definition: median_filter.h:39
#define LINEAR_FIT
#define OPTICFLOW_DEROTATION_CORRECTION_FACTOR_X
#define MEDIAN_DEFAULT_SIZE
Definition: median_filter.h:28
#define OPTICFLOW_KALMAN_FILTER_PROCESS_NOISE
int32_t div_y
Definition: edge_flow.h:80
uint16_t count
Number of times the point has been tracked successfully.
Definition: image.h:60
uint16_t fast9_padding
Padding used in FAST9 detector.
float surface_roughness
The error of the linear fit is a measure of surface roughness.
bool fast9_adaptive
Whether the FAST9 threshold should be adaptive.
void kalman_filter_opticflow_velocity(float *velocity_x, float *velocity_y, float *acceleration_measurement, float fps, float *measurement_noise, float kalman_process_noise, bool reinitialize_kalman)
Filter the velocity with a simple linear kalman filter, together with the accelerometers.
float derotation_correction_factor_x
Correction factor for derotation in x axis, determined from a fit from the gyros and flow rotation...
void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *cam_state, struct image_t *img, struct opticflow_result_t *result)
Run the optical flow on a new image frame.
Grayscale image with only the Y part (uint8 per pixel)
Definition: image.h:37
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
Calculate the difference from start till finish.
void line_fit(int32_t *displacement, int32_t *divergence, int32_t *flow, uint32_t size, uint32_t border, uint16_t RES)
Fits a linear model to an array with pixel displacements with least squares.
Definition: edge_flow.c:247
#define OPTICFLOW_MAX_TRACK_CORNERS
struct FloatRates prev_rates
Gyro Rates from the previous image frame.
void calc_previous_frame_nr(struct opticflow_result_t *result, struct opticflow_t *opticflow, uint8_t current_frame_nr, uint8_t *previous_frame_offset, uint8_t *previous_frame_nr)
Calc_previous_frame_nr; adaptive Time Horizon.
Definition: edge_flow.c:39
#define OPTICFLOW_SUBPIXEL_FACTOR
float kalman_filter_process_noise
Process noise of the model used in the kalman filter.
int16_t flow_x
Flow in x direction from the camera (in subpixels)
#define OPTICFLOW_MAX_ITERATIONS
uint32_t getAmountPeaks(int32_t *edgehist, uint32_t thres, int32_t size)
getAmountPeaks, calculates the amount of peaks in a edge histogram
Definition: edge_flow.c:337
#define DISP_RANGE_MAX
Definition: edge_flow.h:52
signed char int8_t
Definition: types.h:15
uint8_t pyramid_level
Number of pyramid levels used in Lucas Kanade algorithm (0 == no pyramids used)
float noise_measurement
noise of measurement, for state filter
#define OPTICFLOW_KALMAN_FILTER
#define FAST9_LOW_THRESHOLD
void fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners, uint16_t *ret_corners_length, struct point_t **ret_corners, uint16_t *roi)
Do a FAST9 corner detection.
Definition: fast_rosten.c:52
int16_t flow_y
The y direction flow in subpixels.
Definition: image.h:69
uint16_t search_distance
Search distance for blockmatching alg.
float divergence
Divergence as determined with a linear flow fit.
float derotation_correction_factor_y
Correction factor for derotation in Y axis, determined from a fit from the gyros and flow rotation...
float surface_roughness
Surface roughness as determined with a linear optical flow fit.
struct timeval frame_time
Definition: edge_flow.h:67
int32_t * x
Definition: edge_flow.h:65
int16_t flow_der_x
The derotated flow calculation in the x direction (in subpixels)