32 #ifndef BLOB_LOCATOR_FPS
33 #define BLOB_LOCATOR_FPS 0
79 #define Img(X,Y)(((uint8_t*)img->buf)[(Y)*img->w*2+(X)*2])
105 int px = coordinate[0] & 0xFFFe;
106 int py = coordinate[1] & 0xFFFe;
110 for (
int y = 0; y < img->
h - 1; y++) {
112 Img(px + 1, y) = 255;
114 for (
int x = 0; x < img->
w - 1; x += 2) {
116 Img(x + 1, py) = 255;
121 temp += coordinate[1];
163 int largest_size = 0;
166 for (
int i = 0; i < labels_count; i++) {
169 if (labels[i].
pixel_cnt > largest_size) {
176 if (largest_id >= 0) {
179 for (
int y = 0; y < dst.
h; y++) {
180 for (
int x = 0; x < dst.
w / 2; x++) {
181 if (l[y * dst.
w + x] != 0xffff) {
183 if (l[y * dst.
w + x] == largest_id) {
186 p[y * dst.
w * 2 + x * 4] = c;
187 p[y * dst.
w * 2 + x * 4 + 1] = 0x80;
188 p[y * dst.
w * 2 + x * 4 + 2] = c;
189 p[y * dst.
w * 2 + x * 4 + 3] = 0x80;
198 if ((cgx > 1) && (cgx < (dst.
w - 2)) &&
199 (cgy > 1) && (cgy < (dst.
h - 2))
201 p[cgy * dst.
w * 2 + cgx * 2 - 4] = 0xff;
202 p[cgy * dst.
w * 2 + cgx * 2 - 2] = 0x00;
203 p[cgy * dst.
w * 2 + cgx * 2] = 0xff;
204 p[cgy * dst.
w * 2 + cgx * 2 + 2] = 0x00;
205 p[cgy * dst.
w * 2 + cgx * 2 + 4] = 0xff;
206 p[cgy * dst.
w * 2 + cgx * 2 + 6] = 0x00;
207 p[(cgy - 1)*dst.
w * 2 + cgx * 2] = 0xff;
208 p[(cgy - 1)*dst.
w * 2 + cgx * 2 + 2] = 0x00;
209 p[(cgy + 1)*dst.
w * 2 + cgx * 2] = 0xff;
210 p[(cgy + 1)*dst.
w * 2 + cgx * 2 + 2] = 0x00;
226 #include "generated/flight_plan.h"
297 printf(
"Found %d %d \n", x, y);
void image_labeling(struct image_t *input, struct image_t *output, struct image_filter_t *filters, uint8_t filters_cnt, struct image_label_t *labels, uint16_t *labels_count)
Parse UYVY images and make a list of blobs of connected pixels.
uint32_t pixel_cnt
Number of pixels in the blob.
uint8_t filter
Which filter triggered this blob.
uint8_t y_min
YUV color filter.
uint32_t x_sum
Sum of all x coordinates (used to find center of gravity)
struct video_listener * cv_add_to_device(struct video_config_t *device, cv_function func, uint16_t fps, uint8_t id)
Computer vision framework for onboard processing.
void cv_blob_locator_init(void)
void cv_blob_locator_start(void)
struct image_t * cv_marker_func(struct image_t *img, uint8_t camera_id)
void cv_blob_locator_event(void)
void cv_blob_locator_periodic(void)
volatile uint32_t blob_locator
volatile bool window_enabled
void start_vision_land(void)
struct image_t * cv_window_func(struct image_t *img, uint8_t camera_id)
#define BLOB_LOCATOR_FPS
Default FPS (zero means run at camera fps)
struct image_t * cv_blob_locator_func(struct image_t *img, uint8_t camera_id)
volatile bool blob_enabled
uint8_t cv_blob_locator_type
uint8_t cv_blob_locator_reset
void cv_blob_locator_stop(void)
volatile bool marker_enabled
void georeference_project(struct camera_frame_t *tar, int wp)
void georeference_init(void)
void georeference_filter(bool kalman, int wp, int length)
int32_t px
Target pixel coordinate (left = 0)
int32_t h
Frame height [px].
int32_t py
Target pixel coordinate (top = 0)
int32_t w
Frame width [px].
int32_t f
Camera Focal length in [px].
uint16_t detect_window_sizes(uint8_t *in, uint32_t image_width, uint32_t image_height, uint16_t *coordinate, uint32_t *integral_image, uint8_t MODE)
Detect a bright region surrounded by dark or viceversa - sometimes this corresponds to a window.
void image_to_grayscale(struct image_t *input, struct image_t *output)
Convert an image to grayscale.
void image_free(struct image_t *img)
Free the image.
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type)
Create a new image.
void * buf
Image buffer (depending on the image_type)
@ IMAGE_GRAYSCALE
Grayscale image with only the Y part (uint8 per pixel)
@ IMAGE_GRADIENT
An image gradient (int16 per pixel)
struct marker_deviation_t marker(struct image_t *input, uint8_t M)
unsigned short uint16_t
Typedef defining 16 bit unsigned short type.
unsigned int uint32_t
Typedef defining 32 bit unsigned int type.
unsigned char uint8_t
Typedef defining 8 bit unsigned char type.