lots of vision stuff from Ben
diff --git a/vision/BinaryServer.cpp b/vision/BinaryServer.cpp
new file mode 100644
index 0000000..094b719
--- /dev/null
+++ b/vision/BinaryServer.cpp
@@ -0,0 +1,130 @@
+#include "vision/BinaryServer.h"
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <string.h>
+#include <vector>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include "aos/externals/libjpeg/include/jpeglib.h"
+
+#include "aos/atom_code/camera/Buffers.h"
+#include "aos/common/time.h"
+#include "opencv2/opencv.hpp"
+
+
+
+
+namespace frc971 {
+namespace vision {
+
+
+
+static void echo_read_cb(struct bufferevent *bev, void * /*ctx*/){
+ struct evbuffer *input = bufferevent_get_input(bev);
+ struct evbuffer *output = bufferevent_get_output(bev);
+
+ size_t len = evbuffer_get_length(input);
+ char *data;
+ data = (char *)malloc(len);
+ evbuffer_copyout(input, data, len);
+
+ printf("we got some data: %s\n", data);
+
+ evbuffer_add_buffer(output, input);
+}
+
+void BinaryServer::ErrorEvent(struct bufferevent *bev, short events){
+ if (events & BEV_EVENT_ERROR)
+ perror("Error from bufferevent");
+ if (events & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) {
+ have_id = false;
+ bufferevent_free(bev);
+ }
+}
+
+void BinaryServer::Accept(struct evconnlistener *listener,
+ evutil_socket_t fd, struct sockaddr * /*address*/, int /*socklen*/){
+ struct event_base *base = evconnlistener_get_base(listener);
+ if(!have_id){
+ struct bufferevent *bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE);
+ _output = bufferevent_get_output(bev);
+ _bufev = bev;
+ have_id = true;
+ int no_delay_flag = 1;
+ setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &no_delay_flag, sizeof(no_delay_flag));
+
+ bufferevent_setcb(bev, echo_read_cb, NULL, StaticErrorEvent, this);
+
+ bufferevent_enable(bev, EV_READ | EV_WRITE);
+ }
+}
+static void accept_error_cb(struct evconnlistener *listener, void * /*ctx*/)
+{
+ struct event_base *base = evconnlistener_get_base(listener);
+ int err = EVUTIL_SOCKET_ERROR();
+ fprintf(stderr, "Got an error %d (%s) on the listener. "
+ "Shutting down.\n", err, evutil_socket_error_to_string(err));
+
+ event_base_loopexit(base, NULL);
+}
+
+void BinaryServer::StartServer(uint16_t port){
+ _fd = socket(AF_INET, SOCK_STREAM, 0);
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons( port );
+ sin.sin_addr.s_addr = inet_addr("0.0.0.0");
+
+ listener = evconnlistener_new_bind(_base, StaticAccept, this,
+ LEV_OPT_CLOSE_ON_FREE | LEV_OPT_REUSEABLE, -1,
+ (struct sockaddr *) (void *)&sin, sizeof(sin));
+
+ if (!listener) {
+ fprintf(stderr,"%s:%d: Couldn't create listener\n", __FILE__, __LINE__);
+ exit(-1);
+ }
+
+ evconnlistener_set_error_cb(listener, accept_error_cb);
+}
+
+void BinaryServer::Notify(int fd,short /*what*/){
+ char notes[4096];
+ int count = read(fd,notes,4096);
+ if(count == 0){
+ close(fd);
+ fprintf(stderr,"%s:%d: Error No cheeze from OpenCV task!!!\n",__FILE__,__LINE__);
+ exit(-1);
+ }
+ printf("notified!: %d\n",count);
+ if(have_id){
+ printf("got someone to read my stuff!\n");
+ char *binary_data;
+ size_t len;
+ if(_notify->GetData(&binary_data,&len)){
+ printf("here is for sending\n");
+ evbuffer_add_reference(_output, binary_data, len, PacketNotifier::StaticDataSent,_notify);
+ printf("this is how sending went %d\n",bufferevent_flush(_bufev,EV_WRITE, BEV_FLUSH));
+ }
+ }
+}
+
+//Constructor
+BinaryServer::BinaryServer(uint16_t port,
+ frc971::vision::PacketNotifier *notify) :
+ _base(event_base_new()) {
+ have_id = false;
+ StartServer(port);
+ _notify = notify;
+ frame_notify = event_new(_base, notify->RecieverFD(),
+ EV_READ|EV_PERSIST, StaticNotify, this);
+ event_add(frame_notify,NULL);
+ event_base_dispatch(_base);
+ }
+
+} // namespace vision
+} // namespace frc971
diff --git a/vision/BinaryServer.h b/vision/BinaryServer.h
new file mode 100644
index 0000000..76fbd9e
--- /dev/null
+++ b/vision/BinaryServer.h
@@ -0,0 +1,54 @@
+#ifndef FRC971_VISION_BINARY_SERVER_H_
+#define FRC971_VISION_BINARY_SERVER_H_
+#include "event2/buffer.h"
+#include "event2/event.h"
+#include "event2/listener.h"
+#include "event2/bufferevent.h"
+#include "aos/common/mutex.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include "PacketNotifier.h"
+
+namespace frc971 {
+namespace vision {
+
+/* This runs the libevent loop and interfaces in with the sockets provided from the PacketNotifier
+ * to allow a secondary process to focus primarily on doing processing and then feeding this task.
+ */
+class BinaryServer {
+ public:
+ BinaryServer(uint16_t port,PacketNotifier *notify);
+
+
+ void StartServer(uint16_t port);
+ private:
+ event_base *const _base;
+ int _fd;
+ PacketNotifier *_notify;
+ bool have_id;
+ int _client_fd;
+ struct event *frame_notify;
+ struct evbuffer *_output;
+ struct bufferevent *_bufev;
+ struct evconnlistener *listener;
+ void Accept(evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr* /*address*/, int /*socklen*/);
+ void Notify(int /*fd*/, short /*what*/);
+ void ErrorEvent(struct bufferevent *bev,short events);
+ static void StaticAccept(evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr *address, int socklen,void *self){
+ ((BinaryServer *)(self))->Accept(listener, fd, address, socklen);
+ }
+ static void StaticNotify(int fd, short what, void *self){
+ ((BinaryServer *)(self))->Notify(fd, what);
+ }
+ static void StaticErrorEvent(struct bufferevent *bev,short events,void *self){
+ ((BinaryServer *)(self))->ErrorEvent(bev,events);
+ }
+};
+
+} // namespace vision
+} // namespace frc971
+
+#endif // FRC971_VISION_BINARY_SERVER_H_
diff --git a/vision/CameraProcessor.cpp b/vision/CameraProcessor.cpp
new file mode 100644
index 0000000..41e6a2d
--- /dev/null
+++ b/vision/CameraProcessor.cpp
@@ -0,0 +1,451 @@
+
+#include "CameraProcessor.h"
+
+// create a new target
+Target::Target(std::vector<cv::Point> new_contour,
+ std::vector<cv::Point> new_raw_contour,
+ FullRect new_rect, int new_weight, bool _is_90) {
+ this_contour = new_contour;
+ raw_contour = new_raw_contour;
+ rect = new_rect;
+ weight = new_weight;
+ height = getHeight(_is_90);
+}
+
+// calculate distance to target
+double Target::getHeight(bool is_90) {
+ // The 780.3296 is at full resolution 640x480, and we need
+ // to scale back to 320x240
+ //static const double cam_l = 780.3296 / 2.0;
+ ////static const double cam_d = 20.78096;
+ double height;
+ if (is_90) {
+ height = ((rect.ul.x - rect.ur.x) +
+ (rect.bl.x - rect.br.x)) / 2.0;
+ } else {
+ height = ((rect.ur.y + rect.ul.y) -
+ (rect.br.y + rect.bl.y)) / 2.0;
+ }
+ //return cam_l * 12.0 / height;
+ return height;
+}
+
+void Target::refineTarget() {
+ printf("okay refined\n");
+}
+
+FullRect::FullRect() {
+ ur.x = -1;
+ ur.y = -1;
+ ul.x = -1;
+ ul.y = -1;
+ br.x = -1;
+ br.y = -1;
+ bl.x = -1;
+ bl.y = -1;
+}
+
+// turns a contour into easier to access structure
+FullRect calcFullRect(std::vector<cv::Point> *contour){
+ FullRect rect;
+ for(int i=0; i<4; i++){
+ cv::Point2f pt = (*contour)[i];
+ rect.centroid.x += pt.x;
+ rect.centroid.y += pt.y;
+ }
+ rect.centroid.x /= 4;
+ rect.centroid.y /= 4;
+ for(int i=0; i<4; i++){
+ cv::Point2f pt = (*contour)[i];
+ if(pt.y > rect.centroid.y ){
+ if(pt.x > rect.centroid.x){
+ if (rect.ul.x < 0) {
+ rect.ul = pt;
+ } else {
+ rect.ur = pt;
+ }
+ }else{
+ if (rect.ur.x < 0) {
+ rect.ur = pt;
+ } else {
+ rect.ul = pt;
+ }
+ }
+ if (rect.ul.x > 0 && rect.ur.x > 0) {
+ // both are set, so if we got it wrong correct it here
+ if (rect.ul.x > rect.ur.x) {
+ pt = rect.ur;
+ rect.ur = rect.ul;
+ rect.ul = pt;
+ }
+ }
+ }else{
+ if(pt.x > rect.centroid.x){
+ if (rect.bl.x < 0) {
+ rect.bl = pt;
+ } else {
+ rect.br = pt;
+ }
+ }else{
+ if (rect.br.x < 0) {
+ rect.br = pt;
+ } else {
+ rect.bl = pt;
+ }
+ }
+ if (rect.bl.x > 0 && rect.br.x > 0) {
+ // both are set, so if we got it wrong correct it here
+ if (rect.bl.x > rect.br.x) {
+ pt = rect.br;
+ rect.br = rect.bl;
+ rect.bl = pt;
+ }
+ }
+ }
+ }
+ return rect;
+}
+
+// quickly remove targets that do not fit a very broad set of constraints
+bool cullObvious(FullRect rect, double outside_size){
+ // first check that could see a target this size
+ // Calculated from dave's simulation, shloud be 850 and 72000 if filled
+ if((outside_size < 500) || (outside_size > 90000)){
+ return false;
+ }
+ // Targets on the edge are at best inaccurate.
+ // In this case, we just want to point the right way,
+ // so this is no longer a valid assumption.
+ /*if( rect.ur.x < 2 || rect.ur.y < 2 || rect.ur.x > 637 || rect.ur.y > 477 ||
+ rect.ul.x < 2 || rect.ul.y < 2 || rect.ul.x > 637 || rect.ul.y > 477 ||
+ rect.br.x < 2 || rect.br.y < 2 || rect.br.x > 637 || rect.br.y > 477 ||
+ rect.bl.x < 2 || rect.bl.y < 2 || rect.bl.x > 637 || rect.bl.y > 477){
+ return false;
+ }*/
+ // make sure the sides are close to the right ratio of a rect
+ // some really odd shapes get confusing
+ double ratio = norm(rect.ur-rect.ul)/norm(rect.br-rect.bl);
+ if( ratio < .7 || ratio > 1.4 ) {
+ return false;
+ }
+ ratio = norm(rect.ur-rect.br)/norm(rect.ul-rect.bl);
+ if( ratio < .7 || ratio > 1.4 ) {
+ return false;
+ }
+
+ return true;
+}
+
+// sum over values between these two points and normalize
+// see Bresenham's Line Algorithm for the logic of moving
+// over all the pixels between these two points.
+double ProcessorData::calcHistComponent(
+ cv::Point2i start,
+ cv::Point2i end,
+ cv::Mat thresh_img){
+ int dx = abs(end.x - start.x);
+ int dy = abs(end.y - start.y);
+ int sx = (start.x < end.x) ? 1 : -1;
+ int sy = (start.y < end.y) ? 1 : -1;
+ int error = dx-dy;
+
+ int total = 0;
+ int value = 0;
+ int total_error;
+#if LOCAL_DEBUG
+ IplImage gd = *global_display;
+#endif
+ IplImage ti = thresh_img;
+ while(1){
+ total++;
+
+ uchar* ptr = (uchar*) (ti.imageData + start.y * ti.widthStep + start.x);
+ if((int) *ptr) value++;
+ // draw line checked
+#if LOCAL_DEBUG
+ uchar* ptr2 = (uchar*) (gd.imageData + start.y * gd.widthStep + start.x*3);
+ *ptr2++ = 0;
+ *ptr2++ = 255;
+ *ptr2 = 0;
+#endif
+ if(start.x == end.x && start.y == end.y) break;
+ total_error = 2 * error;
+ if(total_error > -dy){
+ error -= dy;
+ start.x += sx;
+ }
+ if(total_error < dx){
+ error += dx;
+ start.y += sy;
+ }
+ }
+ return (double)value/(double)total;
+}
+
+// just a distance function
+double chiSquared(int length, double* histA, double* histB){
+ double sum = 0;
+ for(int i=0; i<length;i++){
+ double diff = *(histB+i) - *(histA+i);
+ sum += (diff * diff) / *(histA+i);
+ }
+ return sum;
+}
+// euclidiean dist function
+double L2_dist(int length, double* histA, double* histB){
+ double sum = 0;
+ for(int i=0; i<length;i++){
+ double diff = *(histB+i) - *(histA+i);
+ sum += (diff * diff);
+ }
+ return sqrt(sum);
+}
+
+// calc and compare the histograms to the desired
+double ProcessorData::checkHistogram(FullRect rect, cv::Mat thresh_img){
+ // found horiz histogram
+ double hist_lr[HIST_SIZE];
+ // step size along left edge
+ cv::Point2f delta_left = (rect.ul - rect.bl)*HIST_SIZE_F;
+ // step size along right edge
+ cv::Point2f delta_right = (rect.ur - rect.br)*HIST_SIZE_F;
+ // sum each left to right line for the histogram
+ for(int i=0; i<HIST_SIZE; i++){
+ hist_lr[i] = calcHistComponent(rect.bl+i*delta_left,
+ rect.br+i*delta_right,thresh_img);
+ }
+ double check_vert = L2_dist(HIST_SIZE, vert_hist, hist_lr);
+ // found vert histogram
+ double hist_ub[HIST_SIZE];
+ // step size along bottom edge
+ cv::Point2f delta_bottom = (rect.bl - rect.br)*HIST_SIZE_F;
+ // step size along top edge
+ cv::Point2f delta_top = (rect.ul - rect.ur)*HIST_SIZE_F;
+ // sum each top to bottom line for the histogram
+ for(int i=0; i<HIST_SIZE; i++){
+ hist_ub[i] = calcHistComponent(rect.ur+i*delta_top,
+ rect.br+i*delta_bottom,thresh_img);
+ }
+ double check_horz = L2_dist(HIST_SIZE, horz_hist, hist_ub);
+
+ // average the two distances
+ double check = (check_vert + check_horz)/2.0;
+ return check;
+}
+
+// return smallest
+template<class T> inline T Min3(T x, T y, T z) {
+ return y <= z ? (x <= y ? x : y)
+ : (x <= z ? x : z);
+}
+
+// return largest
+template<class T> inline T Max3(T x, T y, T z) {
+ return y >= z ? (x >= y ? x : y)
+ : (x >= z ? x : z);
+}
+
+// transforms the contour
+void makeConvex(std::vector<cv::Point> *contour){
+ std::vector<cv::Point2i> hull;
+ convexHull(*contour, hull, false);
+ *contour = hull;
+}
+
+// basic init
+ProcessorData::ProcessorData(int width, int height, bool is_90_) {
+ is_90 = is_90_;
+ // series b images from nasa
+ h1=79; s1=53; v1=82;
+ h2=200; s2=255; v2=255;
+ // For images from Jerry
+ //h1=79; s1=0; v1=11;
+ //h2=160; s2=255; v2=255;
+ img_width = width;
+ img_height = height;
+ buffer_size = img_height * img_width * 3;
+#if LOCAL_DEBUG
+ global_display = cvCreateImage(cvSize(width, height),
+ IPL_DEPTH_8U, 3);
+#endif
+ grey_image = cvCreateImage(cvSize(width, height),
+ IPL_DEPTH_8U, 1);
+ grey_mat = new cv::Mat(grey_image);
+
+ // calculate a desired histogram before we start
+ int j = 0;
+ for(double i=0; j<HIST_SIZE; i+=HIST_SIZE_F){
+ if (is_90) {
+ if(i < 2.0/12.0 || i > (1.0-2.0/12.0) ) horz_hist[j] = 1;
+ else horz_hist[j] = 0.10;
+ if(i < 2.0/24.0 || i > (1.0-2.0/24.0) ) vert_hist[j] = 1;
+ else vert_hist[j] = 4.0/24.0;
+ } else {
+ if(i < 2.0/12.0 || i > (1.0-2.0/12.0) ) vert_hist[j] = 1;
+ else vert_hist[j] = 0.10;
+ if(i < 2.0/24.0 || i > (1.0-2.0/24.0) ) horz_hist[j] = 1;
+ else horz_hist[j] = 4.0/24.0;
+ }
+ j++;
+ }
+}
+
+// throw stuff away
+ProcessorData::~ProcessorData() {
+ cvReleaseImage(&grey_image);
+ cvReleaseImage(&src_header_image);
+ delete(grey_mat);
+}
+
+// reset processor data freeing as little as possible.
+void ProcessorData::clear() {
+ target_list.clear();
+ contour_pairs.clear();
+ hierarchy.clear();
+}
+
+
+// r,g,b values are from 0 to 255
+// h = [0,255], s = [0,255], v = [0,255]
+// if s == 0, then h = 0 (undefined)
+void ProcessorData::RGBtoHSV(uchar r, uchar g, uchar b,
+ uchar *h, uchar *s, uchar *v ) {
+ uchar min, max, delta;
+ min = Min3( r, g, b );
+ max = Max3( r, g, b );
+ *v = max;
+ delta = max - min;
+ if (max == 0 || delta == 0) {
+ *s = 0;
+ *h = 0;
+ return;
+ }
+ *s = (255 * long(delta))/max;
+ if (max == r) {
+ *h = 0 + 43*(g - b)/(delta);
+ } else if (max == g) {
+ *h = 85 + 43*(b - r)/(delta);
+ } else {
+ *h = 171 + 43*(r - g)/(delta);
+ }
+}
+
+// keep anything that is in our HVS wedge
+// by far the longest running function in this code
+void ProcessorData::threshold(uchar* buffer) {
+#if LOCAL_DEBUG
+ uchar * draw_buffer = (uchar *) global_display->imageData;
+#endif
+ for (int i = 0,j = 0;
+ i < (buffer_size);
+ i+= 3,j++) {
+ uchar r = buffer[i + 2];
+ uchar g = buffer[i + 1];
+ uchar b = buffer[i + 0];
+ uchar h, s, v;
+
+ RGBtoHSV(r, g, b, &h, &s, &v);
+
+ /* if (h == 0 && s == 0 && v >= v1 && v <= v2) {
+#if LOCAL_DEBUG
+ draw_buffer[i + 0] = 200;
+ draw_buffer[i + 1] = 50;
+ draw_buffer[i + 2] = 100;
+#endif
+ grey_image->imageData[j] = 255;
+ } else */if(h >= h1 && h <= h2 && v >= v1 && v <= v2 && s >= s1 && s <= s2){
+#if LOCAL_DEBUG
+/* draw_buffer[i + 0] = 255;
+ draw_buffer[i + 1] = 0;
+ draw_buffer[i + 2] = 0;*/
+#endif
+ grey_image->imageData[j] = 255;
+ }else{
+#if LOCAL_DEBUG
+/* draw_buffer[i + 0] = buffer[i + 0];
+ draw_buffer[i + 1] = buffer[i + 1];
+ draw_buffer[i + 2] = buffer[i + 2];*/
+#endif
+ grey_image->imageData[j] = 0;
+ }
+ }
+}
+
+// run find contours and try to make them squares
+void ProcessorData::getContours() {
+ std::vector<std::vector<cv::Point> > raw_contours;
+ //cv::findContours(*grey_mat, raw_contours, hierarchy, CV_RETR_LIST,
+ // CV_CHAIN_APPROX_SIMPLE);
+ cv::findContours(*grey_mat, raw_contours, hierarchy, CV_RETR_EXTERNAL,
+ CV_CHAIN_APPROX_SIMPLE);
+#if LOCAL_DEBUG
+ cv::Mat global_mat(global_display);
+ cv::Scalar color(255,0,0);
+ drawContours(global_mat,raw_contours,-1,color,1);
+
+ std::vector<std::vector<cv::Point> > p_contours;
+#endif
+ if (!raw_contours.empty()) {
+ std::vector<std::vector<cv::Point2i> >::iterator contour_it;
+ for (contour_it=raw_contours.begin();
+ contour_it != raw_contours.end();
+ contour_it++) {
+ // make the contours convex
+ makeConvex(&(*contour_it));
+ std::vector<cv::Point> contour;
+ //then make them rectangle
+ approxPolyDP(*contour_it, contour, 9, true);
+ // stick the raw and processed contours together
+ std::pair<std::vector<cv::Point>,
+ std::vector<cv::Point> > a_pair(
+ *contour_it, contour);
+#if LOCAL_DEBUG
+ p_contours.push_back(contour);
+#endif
+ // and put them in a list
+ contour_pairs.push_back(a_pair);
+ }
+ }
+#if LOCAL_DEBUG
+ cv::Scalar color2(0,0,255);
+ drawContours(global_mat,p_contours,-1,color2,CV_FILLED);
+#endif
+}
+
+// filter the contours down to a list of targets
+void ProcessorData::filterToTargets() {
+ std::vector<std::pair<
+ std::vector<cv::Point>,
+ std::vector<cv::Point> > >::iterator contour_it;
+ for(contour_it=contour_pairs.begin();
+ contour_it != contour_pairs.end();
+ contour_it++){
+ double check = 0;
+ std::vector<cv::Point> raw_contour = std::get<0>(*contour_it);
+ std::vector<cv::Point> contour = std::get<1>(*contour_it);
+ FullRect rect = calcFullRect(&contour);
+ if(contour.size() == 4 &&
+ cullObvious(rect, contourArea(contour)) &&
+ (check = checkHistogram(rect, *grey_mat)) <= HIST_MATCH){
+ // now we have a target, try to improve the square
+#if LOCAL_DEBUG
+ /* printf("________\n");
+ printf("\tcont= %d raw= %d\n",
+ (int)contour.size(), (int)raw_contour.size());
+ std::vector<cv::Point2i>::iterator point_it;
+ for(point_it=raw_contour.begin();
+ point_it != raw_contour.end(); point_it++){
+ printf("(%d,%d)", point_it->x, point_it->y);
+ }
+ printf("\n");*/
+#endif
+ target_list.push_back(Target(contour,
+ raw_contour, rect, check, is_90));
+ }
+ //if(contour.size() == 4 &&
+ // cullObvious(rect, contourArea(contour)) ) {
+ // printf("check= %.2f\n", check);
+ //}
+ }
+}
+
diff --git a/vision/CameraProcessor.h b/vision/CameraProcessor.h
new file mode 100644
index 0000000..c485bda
--- /dev/null
+++ b/vision/CameraProcessor.h
@@ -0,0 +1,89 @@
+
+#ifndef _CAMERA_PROCESSOR_H_
+#define _CAMERA_PROCESSOR_H_
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <utility>
+#include <vector>
+
+#include "opencv2/imgproc/imgproc.hpp"
+
+// an over described geometric representation of a rectangle
+class FullRect {
+ public:
+ FullRect();
+ cv::Point2f ur; // upper right
+ cv::Point2f ul; // upper left
+ cv::Point2f br; // bottom right
+ cv::Point2f bl; // bottom_left
+ cv::Point2f centroid; //centroid
+};
+
+// All data needed once a target is found
+class Target {
+ public:
+ Target(std::vector<cv::Point> new_contour,
+ std::vector<cv::Point> new_raw_contour,
+ FullRect new_rect, int new_weight, bool _is_90);
+ void refineTarget();
+ double getHeight(bool is_90);
+ FullRect rect; // geometric representation of the target
+ std::vector<cv::Point> this_contour; // opencv contour of target
+ std::vector<cv::Point> raw_contour; // opencv contour of target
+ double height; // top target to robot
+ double weight; // confidence in this target
+};
+
+// main class for processing image data. All relavent data should be
+// accessible through this structure.
+class ProcessorData {
+ public:
+ ProcessorData(int width, int height, bool is_90_);
+ ~ProcessorData();
+ void RGBtoHSV(uchar r, uchar g, uchar b,
+ uchar *h, uchar *s, uchar *v);
+ void threshold(uchar* buffer);
+ void getContours();
+ void filterToTargets();
+ void clear();
+ //protected:
+ int img_width; // all images should be this width
+ int img_height; // and this height
+ bool is_90;
+ int buffer_size; // width * height * 3
+ IplImage *grey_image; // thresholded image
+ cv::Mat *grey_mat; // Matrix representaion (just a header)
+ std::vector<std::pair<std::vector<cv::Point>,
+ std::vector<cv::Point> > > contour_pairs;
+ //std::vector<std::vector<cv::Point> > contours; // filtered contours
+ //yystd::vector<std::vector<cv::Point> > raw_contours; //original contours
+ std::vector<cv::Vec4i> hierarchy; // ordering on contours
+ cv::MemStorage g_storage; // opencv storage
+ static const int HIST_SIZE = 20; // dimension of histogram
+ // ie number of scan lines
+ static constexpr double HIST_SIZE_F = 1.0/20.0; // step size
+ // should be 1/HIST_SIZE
+ double vert_hist[HIST_SIZE]; // desired vertical histogram
+ double horz_hist[HIST_SIZE]; // desired horizontal histogram
+ // defines the minimum dist for a match
+ static constexpr double HIST_MATCH = 1.76;
+ double calcHistComponent(
+ cv::Point2i start,
+ cv::Point2i end,
+ cv::Mat thresh_img);
+ double checkHistogram(
+ FullRect rect,
+ cv::Mat thresh_img);
+ public:
+ int h1, s1, v1, h2, s2, v2; // HSV min and max
+ // must be public for tuning
+ IplImage * global_display;
+
+ IplImage *src_header_image; // header for main image
+ std::vector<Target> target_list; // list of found targets
+};
+
+#endif //_CAMERA_PROCESSOR_H_
+
diff --git a/vision/GoalMaster.cpp b/vision/GoalMaster.cpp
new file mode 100644
index 0000000..9557072
--- /dev/null
+++ b/vision/GoalMaster.cpp
@@ -0,0 +1,34 @@
+#include "aos/aos_core.h"
+#include "math.h"
+
+#include "frc971/queues/GyroAngle.q.h"
+#include "frc971/queues/CameraTarget.q.h"
+#include "aos/common/time.h"
+#include "vision/RingBuffer.h"
+
+using frc971::vision::RingBuffer;
+using frc971::sensors::gyro;
+using frc971::vision::targets;
+using frc971::vision::target_angle;
+
+int main(){
+ RingBuffer< ::aos::time::Time,double> buff;
+ ::aos::Init();
+ while (true) {
+ gyro.FetchNextBlocking();
+ buff.Sample(gyro->sent_time, gyro->angle);
+ if(targets.FetchNext()){
+ const frc971::vision::CameraTarget *goal = targets.get();
+ ::aos::time::Time stamp = ::aos::time::Time::InNS(goal->timestamp);
+ double angle_goal =
+ buff.ValueAt(stamp) - M_PI * goal->percent_azimuth_off_center / 4;
+ printf("%g ",angle_goal);
+ printf("%g\n",gyro->angle);
+
+ target_angle.MakeWithBuilder()
+ .target_angle(angle_goal).Send();
+ }
+ }
+ ::aos::Cleanup();
+}
+
diff --git a/vision/JPEGRoutines.cpp b/vision/JPEGRoutines.cpp
new file mode 100644
index 0000000..749e6b0
--- /dev/null
+++ b/vision/JPEGRoutines.cpp
@@ -0,0 +1,196 @@
+#include "vision/OpenCVWorkTask.h"
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <string.h>
+
+#include "aos/common/time.h"
+#include "opencv2/opencv.hpp"
+#include "vision/JPEGRoutines.h"
+
+
+
+
+namespace frc971 {
+namespace vision {
+
+
+/* This is also adapted from libjpeg to be used on decompression tables rather than
+ * compression tables as it was origionally intended
+ */
+void decompress_add_huff_table (j_decompress_ptr cinfo,
+ JHUFF_TBL **htblptr, const UINT8 *bits, const UINT8 *val)
+/* Define a Huffman table */
+{
+ int nsymbols, len;
+
+ if (*htblptr == NULL)
+ *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo);
+
+ /* Copy the number-of-symbols-of-each-code-length counts */
+ memcpy((*htblptr)->bits, bits, sizeof((*htblptr)->bits));
+
+ /* Validate the counts. We do this here mainly so we can copy the right
+ * number of symbols from the val[] array, without risking marching off
+ * the end of memory. jchuff.c will do a more thorough test later.
+ */
+ nsymbols = 0;
+ for (len = 1; len <= 16; len++)
+ nsymbols += bits[len];
+ if (nsymbols < 1 || nsymbols > 256){
+ fprintf(stderr,"%s:%d: Error, bad huffman table",__FILE__,__LINE__);
+ exit(-1);
+ }
+
+ memcpy((*htblptr)->huffval, val, nsymbols * sizeof(uint8_t));
+
+}
+
+/* standard_huff_tables is taken from libjpeg compression stuff
+ * and is here used to set up the same tables in the decompression structure.
+ */
+void standard_huff_tables (j_decompress_ptr cinfo)
+ /* Set up the standard Huffman tables (cf. JPEG standard section K.3) */
+ /* IMPORTANT: these are only valid for 8-bit data precision! */
+{
+ static const UINT8 bits_dc_luminance[17] =
+ { /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 };
+ static const UINT8 val_dc_luminance[] =
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+
+ static const UINT8 bits_dc_chrominance[17] =
+ { /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
+ static const UINT8 val_dc_chrominance[] =
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+
+ static const UINT8 bits_ac_luminance[17] =
+ { /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d };
+ static const UINT8 val_ac_luminance[] =
+ { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa };
+
+ static const UINT8 bits_ac_chrominance[17] =
+ { /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 };
+ static const UINT8 val_ac_chrominance[] =
+ { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa };
+
+ decompress_add_huff_table(cinfo, &cinfo->dc_huff_tbl_ptrs[0],
+ bits_dc_luminance, val_dc_luminance);
+ decompress_add_huff_table(cinfo, &cinfo->ac_huff_tbl_ptrs[0],
+ bits_ac_luminance, val_ac_luminance);
+ decompress_add_huff_table(cinfo, &cinfo->dc_huff_tbl_ptrs[1],
+ bits_dc_chrominance, val_dc_chrominance);
+ decompress_add_huff_table(cinfo, &cinfo->ac_huff_tbl_ptrs[1],
+ bits_ac_chrominance, val_ac_chrominance);
+}
+
+
+
+
+void process_jpeg(unsigned char *out,unsigned char *image,size_t size){
+ struct jpeg_decompress_struct cinfo;
+ struct jpeg_error_mgr jerr;
+
+ static aos::time::Time timestamp_old = aos::time::Time::Now();
+ aos::time::Time timestamp_start = aos::time::Time::Now();
+
+
+ cinfo.err = jpeg_std_error( &jerr );
+ cinfo.out_color_space = JCS_RGB;
+ jpeg_create_decompress( &cinfo );
+ //jpeg_stdio_src( &cinfo, infile );
+ jpeg_mem_src(&cinfo,image,size);
+
+ jpeg_read_header( &cinfo, TRUE );
+ standard_huff_tables (&cinfo);
+
+
+// printf( "JPEG File Information: \n" );
+// printf( "Image width and height: %d pixels and %d pixels.\n", cinfo.image_width, cinfo.image_height );
+// printf( "Color components per pixel: %d.\n", cinfo.num_components );
+// printf( "Color space: %d.\n", cinfo.jpeg_color_space );
+ printf("JpegDecompressed\n");
+
+ jpeg_start_decompress( &cinfo );
+
+ int offset = 0;
+ int step = cinfo.num_components * cinfo.image_width;
+ unsigned char *buffers[cinfo.image_height];
+ for (int i = cinfo.image_height - 1; i >= 0; --i) {
+ buffers[i] = &out[offset];
+ offset += step;
+ }
+
+ while( cinfo.output_scanline < cinfo.image_height )
+ {
+ jpeg_read_scanlines(&cinfo, &buffers[cinfo.output_scanline],
+ cinfo.image_height - cinfo.output_scanline);
+ }
+
+ /* This used to do BGR to RGB conversions inline */
+/*
+ for (int i = 0; i < (int)(cinfo.image_height * cinfo.image_width * 3); i+= 3) {
+ uint8_t b = out[i + 0];
+ uint8_t r = out[i + 2];
+ out[i + 0] = r;
+ out[i + 2] = b;
+ }
+*/
+ jpeg_finish_decompress( &cinfo );
+ jpeg_destroy_decompress( &cinfo );
+
+ aos::time::Time timestamp_end = aos::time::Time::Now();
+
+ double jpeg_part = ((timestamp_end - timestamp_start).nsec()) / 1000000000.0;
+ double longer_part = ((timestamp_end - timestamp_old).nsec()) / 1000000000.0;
+
+ printf("%g %g\n",jpeg_part / longer_part,1.0 / longer_part);
+
+ timestamp_old = timestamp_end;
+
+}
+
+} // namespace vision
+} // namespace frc971
+
diff --git a/vision/JPEGRoutines.h b/vision/JPEGRoutines.h
new file mode 100644
index 0000000..5a1dc14
--- /dev/null
+++ b/vision/JPEGRoutines.h
@@ -0,0 +1,17 @@
+
+#include "aos/externals/libjpeg/include/jpeglib.h"
+
+
+
+namespace frc971 {
+namespace vision {
+
+void decompress_add_huff_table (j_decompress_ptr cinfo,
+ JHUFF_TBL **htblptr, const UINT8 *bits, const UINT8 *val);
+
+void standard_huff_tables (j_decompress_ptr cinfo);
+
+void process_jpeg(unsigned char *out,unsigned char *image,size_t size);
+
+} // namespace vision
+} // namespace frc971
diff --git a/vision/OpenCVWorkTask.cpp b/vision/OpenCVWorkTask.cpp
new file mode 100644
index 0000000..89f168d
--- /dev/null
+++ b/vision/OpenCVWorkTask.cpp
@@ -0,0 +1,123 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <string.h>
+#include <vector>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include "opencv2/opencv.hpp"
+
+#include "aos/common/time.h"
+#include "aos/atom_code/camera/Buffers.h"
+#include "aos/externals/libjpeg/include/jpeglib.h"
+
+#include "vision/OpenCVWorkTask.h"
+#include "vision/CameraProcessor.h"
+#include "vision/BinaryServer.h"
+#include "vision/PacketNotifier.h"
+#include "vision/JPEGRoutines.h"
+
+
+
+namespace frc971 {
+namespace vision {
+
+} // namespace vision
+} // namespace frc971
+
+void reciever_main(frc971::vision::PacketNotifier *notify){
+ frc971::vision::BinaryServer test(8020,notify);
+}
+
+#include "frc971/queues/CameraTarget.q.h"
+using frc971::vision::targets;
+
+#include <iostream>
+void sender_main(frc971::vision::PacketNotifier *notify){
+ ::aos::InitNRT();
+ ::aos::camera::Buffers buffers;
+ CvSize size;
+ size.width = ::aos::camera::Buffers::Buffers::kWidth;
+ size.height = ::aos::camera::Buffers::Buffers::kHeight;
+
+ // create processing object
+ ProcessorData processor(size.width, size.height, false);
+
+ printf("started sender main\n");
+ while(true){
+ //usleep(7500);
+ size_t data_size;
+ timeval timestamp_timeval;
+ const void *image = buffers.GetNext(
+ true, &data_size, ×tamp_timeval, NULL);
+ ::aos::time::Time timestamp(timestamp_timeval);
+
+ //TODO(pschuh): find proper way to cast away const for this: :(
+ // parker you prolly want const_cast<type>(var);
+ printf("\nNew Image Recieved\n");
+ std::cout << timestamp << std::endl;
+ unsigned char *buffer = (unsigned char *)notify->GetBuffer();
+ frc971::vision::process_jpeg(buffer, (unsigned char *)image, data_size);
+ // build the headers on top of the buffer
+ cvSetData(processor.src_header_image,
+ buffer,
+ ::aos::camera::Buffers::Buffers::kWidth * 3);
+
+ // transform the buffer into targets
+ processor.threshold(buffer);
+ processor.getContours();
+ processor.filterToTargets();
+
+ // could be used for debug ie drawContours
+ //std::vector<std::vector<cv::Point> > target_contours;
+ //std::vector<std::vector<cv::Point> > best_contours;
+ std::vector<Target>::iterator target_it;
+ Target *best_target = NULL;
+ // run through the targets
+ for(target_it = processor.target_list.begin();
+ target_it != processor.target_list.end(); target_it++){
+ //target_contours.push_back(*(target_it->this_contour));
+ // select the highest target
+ if (best_target == NULL) {
+ best_target = &*target_it;
+ } else {
+ if (target_it->height < best_target->height) {
+ best_target = &*target_it;
+ }
+ }
+ }
+ // if we found one then send it on
+ if (best_target != NULL) {
+ targets.MakeWithBuilder()
+ .percent_azimuth_off_center(
+ best_target->rect.centroid.y / (double)::aos::camera::Buffers::kHeight - 0.5)
+ .percent_elevation_off_center(
+ best_target->rect.centroid.x / (double)::aos::camera::Buffers::kWidth - 0.5)
+ .timestamp(timestamp.ToNSec())
+ .Send();
+ }
+ notify->Notify();
+ }
+ ::aos::Cleanup();
+}
+
+
+int main(int /*argc*/, char** /*argv*/){
+ frc971::vision::PacketNotifier *notify = frc971::vision::PacketNotifier::MMap(
+ ::aos::camera::Buffers::kHeight * ::aos::camera::Buffers::kWidth * 3);
+ pid_t pid = fork();
+ if(pid < 0){
+ fprintf(stderr,"%s:%d: Error in fork()\n",__FILE__,__LINE__);
+ }
+ if(pid == 0){
+ notify->RegisterSender();
+ sender_main(notify);
+ }else{
+ notify->RegisterReciever();
+ reciever_main(notify);
+ }
+}
+
diff --git a/vision/OpenCVWorkTask.h b/vision/OpenCVWorkTask.h
new file mode 100644
index 0000000..3531b68
--- /dev/null
+++ b/vision/OpenCVWorkTask.h
@@ -0,0 +1,18 @@
+#ifndef FRC971_VISION_OPENCV_WORK_TASK_H_
+#define FRC971_VISION_OPENCV_WORK_TASK_H_
+#include "event2/buffer.h"
+#include "event2/event.h"
+#include "event2/listener.h"
+#include "event2/bufferevent.h"
+#include "aos/common/mutex.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+namespace frc971 {
+namespace vision {
+
+} // namespace vision
+} // namespace frc971
+
+#endif // FRC971_VISION_OPENCV_WORK_TASK_H_
diff --git a/vision/PacketNotifier.cpp b/vision/PacketNotifier.cpp
new file mode 100644
index 0000000..ca62f5f
--- /dev/null
+++ b/vision/PacketNotifier.cpp
@@ -0,0 +1,78 @@
+#include "vision/PacketNotifier.h"
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <string.h>
+
+namespace frc971 {
+namespace vision {
+
+void PacketNotifier::RegisterSender(){
+ close(fd[1]);
+}
+void PacketNotifier::RegisterReciever(){
+ close(fd[0]);
+}
+PacketNotifier *PacketNotifier::MMap(size_t data_size){
+ PacketNotifier *data;
+ data = (PacketNotifier *)mmap(NULL, ((data_size * 3 + 4095 + sizeof(PacketNotifier)) / 4096) * 4096,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ data->data_size = data_size;
+ socketpair(PF_LOCAL, SOCK_STREAM, 0, data->fd);
+ for(int i = 0;i < 3;i++){
+ data->buffs[i] = (uint8_t *)data + (sizeof(PacketNotifier) + i * data_size);
+ }
+ void *place = &(data->mutex);
+ *((int *)place) = 0; // The Mutex class needs a placement new operator. (you know, keep the masses happy);
+ data->in_flight = false;
+ data->to_send = -1;
+ data->sending = -1;
+ return data;
+}
+void *PacketNotifier::GetBuffer(){
+ mutex.Lock();
+ for(int i = 0; i < 3 ; i++){ //search for open spot.
+ if(i != sending && i != to_send){ //open
+ filling = i;
+ mutex.Unlock();
+ printf("leasing out to fill buff # %d\n",i);
+ return buffs[i];
+ }
+ }
+ mutex.Unlock();
+ printf("Error in the fabric of the universe\n");
+ exit(-42);
+}
+void PacketNotifier::Notify(){
+ mutex.Lock();
+ to_send = filling;
+ filling = -1;
+ mutex.Unlock();
+ // wall error
+ if(write(fd[0],"\n",1)) {}
+}
+
+void PacketNotifier::DataSent(const void * /*data*/, size_t /*datalen*/){
+ printf("packet_sent: %d; fill: %d; to_send: %d \n",sending,filling,to_send);
+ mutex.Lock();
+ sending = -1;
+ mutex.Unlock();
+ in_flight = false;
+}
+bool PacketNotifier::GetData(char **place_to_put,size_t *length){
+ if(in_flight) return false;
+ mutex.Lock();
+ *length = data_size;
+ *place_to_put = (char *)buffs[to_send];
+ sending = to_send;
+ to_send = -1;
+ mutex.Unlock();
+ in_flight = true;
+ return true;
+}
+
+} // namespace vision
+} // namespace frc971
+
diff --git a/vision/PacketNotifier.h b/vision/PacketNotifier.h
new file mode 100644
index 0000000..7aff7fa
--- /dev/null
+++ b/vision/PacketNotifier.h
@@ -0,0 +1,45 @@
+#ifndef FRC971_VISION_PACKET_NOTIFIER_H_
+#define FRC971_VISION_PACKET_NOTIFIER_H_
+#include "event2/buffer.h"
+#include "event2/event.h"
+#include "event2/listener.h"
+#include "event2/bufferevent.h"
+#include "aos/common/mutex.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+namespace frc971 {
+namespace vision {
+
+/* This class lives in shared memory (using an anonomous mmap) to transfer data between
+ * the server process and the image processing process.
+ */
+struct PacketNotifier{
+ aos::Mutex mutex;
+ int fd[2];
+ //3 things can be happening:
+ //something waiting to be sent, something sending, and something getting filled (decompressed to)
+ void *buffs[3];
+ int to_send;
+ int filling;
+ int sending;
+ bool in_flight;
+ size_t data_size;
+ void Notify();
+ void RegisterSender();
+ void RegisterReciever();
+ int RecieverFD(){ return fd[1]; }
+ static PacketNotifier *MMap(size_t data_size);
+ void DataSent(const void * /*data*/, size_t /*datalen*/);
+ void *GetBuffer();
+ static void StaticDataSent(const void *data, size_t datalen, void *self){
+ ((PacketNotifier *)(self))->DataSent(data,datalen);
+ }
+ bool GetData(char **place_to_put,size_t *length);
+};
+} // namespace vision
+} // namespace frc971
+
+
+#endif //FRC971_VISION_PACKET_NOTIFIER_H_
diff --git a/vision/RingBuffer.h b/vision/RingBuffer.h
new file mode 100644
index 0000000..3041828
--- /dev/null
+++ b/vision/RingBuffer.h
@@ -0,0 +1,65 @@
+#ifndef _FRC971_VISION_RINGBUFFER_H_
+#define _FRC971_VISION_RINGBUFFER_H_
+#include <stdio.h>
+#include <stdlib.h>
+namespace frc971{
+namespace vision{
+
+template<class T,class V>
+class RingBuffer{
+ //record class to hold sampes
+ class Samples{
+ public:
+ T time;
+ V value;
+ };
+ Samples *samples;
+ int current_;
+ int wraps_;
+ V value_at(int index){
+ return samples[index & 255].value;
+ }
+ T time_at(int index){
+ return samples[index & 255].time;
+ }
+ public:
+ RingBuffer(){
+ current_ = 0;
+ wraps_ = 0;
+ samples = (Samples *)malloc(sizeof(Samples) * 256);
+ }
+ // Adds samples into the ringbuffer.
+ void Sample(T time,V val){
+ current_ += 1;
+ wraps_ += current_ / 256;
+ current_ = current_ % 256;
+ samples[current_].time = time;
+ samples[current_].value = val;
+ }
+ // Binary Search to find and interpolate the values.
+ V ValueAt(T time){
+ int start = current_ - 255;
+ int end = current_;
+ if(start < 0 && !wraps_){
+ start = 0;
+ }
+ int max = end;
+ int min = start;
+ while(end - start > 1){
+ int mid = (start + end) / 2;
+ Samples check = samples[mid & 255];
+ if(check.time < time){
+ start = mid;
+ min = mid;
+ }else{
+ max = mid;
+ end = mid;
+ }
+ }
+ return value_at(min) + (value_at(max) - value_at(min)) *
+ ((time - time_at(min)).FloatingPointSec()/(time_at(max) - time_at(min)).FloatingPointSec());
+ }
+};
+}; // vision
+}; // frc971
+#endif // _FRC971_VISION_RINGBUFFER_H_
diff --git a/vision/SensorProcessor.cpp b/vision/SensorProcessor.cpp
new file mode 100644
index 0000000..1264a20
--- /dev/null
+++ b/vision/SensorProcessor.cpp
@@ -0,0 +1,50 @@
+
+#include "SensorProcessor.h"
+#include <stdio.h>
+
+// give a set of x -> fx pairs find a range for our value
+// then interpolate between and return an interpolated fx.
+// If the value is off the end just extend the line to
+// meet our point. If something does go wrong (and it
+// never should) it will return -1.
+double interpolate(int num_interp_vals,
+ const Interpolation *interp, double value) {
+ double dy;
+ double dx;
+ double a;
+ double intercept;
+ //printf("for val %.1f\n", value);
+ if (value < interp[0].x) {
+ // if closer than nearest
+ dy = interp[1].fx - interp[0].fx;
+ dx = interp[1].x - interp[0].x;
+ a = value - interp[0].x;
+ intercept = interp[0].fx;
+ //printf("LESS THAN\n");
+ } else if (value > interp[num_interp_vals-1].x){
+ // if further than furthest
+ dy = interp[num_interp_vals-1].fx - interp[num_interp_vals-2].fx;
+ dx = interp[num_interp_vals-1].x - interp[num_interp_vals-2].x;
+ a = value - interp[num_interp_vals-2].x;
+ intercept = interp[num_interp_vals-2].fx;
+ //printf("GT THAN\n");
+ } else {
+ //printf("gh0\n");
+ // scan for range
+ for(int i=0; i<num_interp_vals-1; i++){
+ if(value >= interp[i].x && value <= interp[i+1].x){
+ // printf("(%.1f,%.1f)=(%.1f,%.1f)\n",
+ // interp[i].x, interp[i+1].x,
+ // interp[i].fx, interp[i+1].fx);
+ double lambda =
+ (value - interp[i].x)/(interp[i+1].x - interp[i].x);
+ return (1-lambda)*interp[i].fx + lambda*interp[i+1].fx;
+ }
+ }
+ // this should maybe be an assert
+ return -1;
+ }
+
+ return ( (dy/dx)*a + intercept );
+}
+
diff --git a/vision/SensorProcessor.h b/vision/SensorProcessor.h
new file mode 100644
index 0000000..b5ad65c
--- /dev/null
+++ b/vision/SensorProcessor.h
@@ -0,0 +1,28 @@
+
+#ifndef _SENSOR_PROCESSOR_H_
+#define _SENSOR_PROCESSOR_H_
+
+// struct maps a single point x to to a value f of x
+typedef struct {
+ double x;
+ double fx;
+} Interpolation;
+
+// a set of mapping to use to determine distance
+// in inches given a pixel offset
+const Interpolation pixel_to_dist[4] = {
+ {43.0, 495.0},
+ {98.0, 260.0},
+ {145.75, 174.0},
+ {216.75, 110.0}};
+const Interpolation pixel_to_dist640x480[4] = {
+ {86.0, 495.0},
+ {196.0, 260.0},
+ {291.5, 176.0},
+ {433.5, 110.0}};
+
+double interpolate(int num_interp_vals,
+ const Interpolation *interp, double value);
+
+#endif //_SENSOR_PROCESSOR_H_
+
diff --git a/vision/bmp_stream/bmp.rb b/vision/bmp_stream/bmp.rb
new file mode 100644
index 0000000..122c9de
--- /dev/null
+++ b/vision/bmp_stream/bmp.rb
@@ -0,0 +1,11 @@
+class BMPHeader
+ def initialize(width,height)
+ @width = width
+ @height = height
+ end
+ def text()
+ size = @width * @height * 3
+ return ["BM",size + 54,"\0\0\0\0",54,40,@width,
+ @height,1,24,0,size,2835,2835,0,0].pack("a2Ia4IIIIssIIIIII")
+ end
+end
diff --git a/vision/bmp_stream/bmp_dump.rb b/vision/bmp_stream/bmp_dump.rb
new file mode 100644
index 0000000..6c5133c
--- /dev/null
+++ b/vision/bmp_stream/bmp_dump.rb
@@ -0,0 +1,45 @@
+require "socket"
+
+require "bmp"
+sock = TCPSocket.new(ARGV[0] || "fitpc",8020)
+$width = 640 / 2
+$height = 480 / 2
+$header = BMPHeader.new($width,$height).text()
+
+
+
+require "rubygems"
+require "gtk2"
+
+
+$image = Gtk::Image.new()
+$window = Gtk::Window.new()
+$window.add($image)
+$window.show_all()
+$window.signal_connect("delete-event") { Gtk.main_quit}
+
+loader = Gdk::PixbufLoader.new
+loader.write($header)
+data = sock.read($width * $height * 3)
+loader.last_write(data)
+loader.close
+$image.pixbuf = loader.pixbuf
+$oldtime = Time.now
+i = 0
+Gtk.idle_add do
+ loader = Gdk::PixbufLoader.new
+ loader.write($header)
+ data = sock.read($width * $height * 3)
+# (640 * 480).times do |i| #BGR -> RGB
+# b,g,r = data[i * 3 + 0],data[i * 3 + 1],data[i * 3 + 2]
+# data[i * 3 + 0],data[i * 3 + 1],data[i * 3 + 2] = r,g,b
+# end
+ loader.last_write(data)
+ loader.close
+ new_time = Time.now()
+ puts 1 / (new_time - $oldtime)
+ $oldtime = new_time
+ $image.pixbuf = loader.pixbuf
+end
+
+Gtk.main()
diff --git a/vision/bmp_stream/bmp_stream_from_file.rb b/vision/bmp_stream/bmp_stream_from_file.rb
new file mode 100644
index 0000000..3324484
--- /dev/null
+++ b/vision/bmp_stream/bmp_stream_from_file.rb
@@ -0,0 +1,43 @@
+require "socket"
+$header = File.open("header.bmp") { |f| f.read(54)}
+
+
+sock = File.open("output.bmp_stream")
+#TCPSocket.new(ARGV[0] || "fitpc",8020)
+
+
+
+require "rubygems"
+require "gtk2"
+
+
+$image = Gtk::Image.new()
+$window = Gtk::Window.new()
+$window.add($image)
+$window.show_all()
+$window.signal_connect("delete-event") { Gtk.main_quit}
+
+loader = Gdk::PixbufLoader.new
+loader.write($header)
+data = sock.read(320 * 240 * 3)
+loader.last_write(data)
+loader.close
+$image.pixbuf = loader.pixbuf
+$oldtime = Time.now
+Gtk.timeout_add(1000) do
+ loader = Gdk::PixbufLoader.new
+ loader.write($header)
+ data = sock.read(320 * 240 * 3)
+# (640 * 480).times do |i| #BGR -> RGB
+# b,g,r = data[i * 3 + 0],data[i * 3 + 1],data[i * 3 + 2]
+# data[i * 3 + 0],data[i * 3 + 1],data[i * 3 + 2] = r,g,b
+# end
+ loader.last_write(data)
+ loader.close
+ new_time = Time.now()
+ puts 1.0 / (new_time - $oldtime)
+ $oldtime = new_time
+ $image.pixbuf = loader.pixbuf
+end
+
+Gtk.main()
diff --git a/vision/bmp_stream/bmp_stream_to_file.rb b/vision/bmp_stream/bmp_stream_to_file.rb
new file mode 100644
index 0000000..962bb2d
--- /dev/null
+++ b/vision/bmp_stream/bmp_stream_to_file.rb
@@ -0,0 +1,20 @@
+require "socket"
+$header = File.open("header.bmp") { |f| f.read(54)}
+
+
+sock = TCPSocket.new(ARGV[0] || "fitpc",8020)
+
+
+
+require "rubygems"
+require "gtk2"
+
+
+
+File.open("output.bmp_stream","w+") do |file|
+ 400.times do |i|
+ data = sock.read(320 * 240 * 3)
+ file.print(data)
+ puts "frame: #{i}"
+ end
+end
diff --git a/vision/tests/FieldDBGCamProc.cpp b/vision/tests/FieldDBGCamProc.cpp
new file mode 100644
index 0000000..f23b541
--- /dev/null
+++ b/vision/tests/FieldDBGCamProc.cpp
@@ -0,0 +1,210 @@
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include "../CameraProcessor.h"
+#include "../SensorProcessor.h"
+
+#include "opencv2/highgui/highgui.hpp"
+
+const int num_names = 39;
+
+
+static const bool USE_ROTATED = true;
+static const int use_width = 320;
+static const int use_height = 240;
+const char * image_names[num_names] = {
+ "NASA_bmp/img26_series_b_side_204_e65.jpg",
+ "NASA_bmp/img19_series_b_side_110_e65.jpg",
+ "NASA_bmp/img12_series_b_center_224_e65.jpg",
+ "NASA_bmp/img23_series_b_side_101_e65.jpg",
+ "NASA_bmp/img15_series_b_side_230_e65.jpg",
+ "NASA_bmp/img10_series_b_center_203_e65.jpg",
+ "NASA_bmp/img11_series_b_center_203_e65.jpg",
+ "NASA_bmp/img13_series_b_center_260_e65.jpg",
+ "NASA_bmp/img14_series_b_center_251_e65.jpg",
+ "NASA_bmp/img16_series_b_side_196_e65.jpg",
+ "NASA_bmp/img17_series_b_side_160_e65.jpg",
+ "NASA_bmp/img18_series_b_side_140_e65.jpg",
+ "NASA_bmp/img1_center_200_e65.jpg",
+ "NASA_bmp/img20_series_b_side_114_e65.jpg",
+ "NASA_bmp/img21_series_b_side_137_e65.jpg",
+ "NASA_bmp/img22_center field_e10.jpg",
+ "NASA_bmp/img22_dog Center Field_e10.jpg",
+ "NASA_bmp/img22_series_b_side_150_e65.jpg",
+ "NASA_bmp/img23_center field_e10.jpg",
+ "NASA_bmp/img24_center field_e10.jpg",
+ "NASA_bmp/img24_series_b_side_104_e65.jpg",
+ "NASA_bmp/img25_series_b_side_195_e65.jpg",
+ "NASA_bmp/img27_series_b_side_192_e65.jpg",
+ "NASA_bmp/img28_series_b_side_192_e65.jpg",
+ "NASA_bmp/img29_series_b_side_186_e65.jpg",
+ "NASA_bmp/img2_center_207_e65.jpg",
+ "NASA_bmp/img30_series_b_side_177_e65.jpg",
+ "NASA_bmp/img31_series_b_side_176_e65.jpg",
+ "NASA_bmp/img32_series_b_side_212_e65.jpg",
+ "NASA_bmp/img33_series_b_side_251_e65.jpg",
+ "NASA_bmp/img34_series_b_side_272_e65.jpg",
+ "NASA_bmp/img35_series_b_side_23+219_e65.jpg",
+ "NASA_bmp/img3_center_180_e65.jpg",
+ "NASA_bmp/img4_series_b_center_106_e65.jpg",
+ "NASA_bmp/img5_series_b_center_122_e65.jpg",
+ "NASA_bmp/img6_series_b_center_145_e65.jpg",
+ "NASA_bmp/img7_series_b_center_174_e65.jpg",
+ "NASA_bmp/img8_series_b_center_196_e65.jpg",
+ "NASA_bmp/img9_series_b_center_201_e65.jpg"};
+
+const char * WINDOW_NAME = "Treshhold Window";
+const char * WINDOW_NAME2 = "Target Window";
+
+
+static void onMouse( int event, int x, int y, int, void* userData ) {
+ if( event != CV_EVENT_LBUTTONDOWN ) return;
+ ProcessorData *proc = (ProcessorData *) userData;
+ IplImage *image = proc->src_header_image;
+ uchar b = *((uchar*) (image->imageData + y*image->widthStep + 3*x));
+ uchar g = *((uchar*) (image->imageData + y*image->widthStep + 3*(x+1)));
+ uchar r = *((uchar*) (image->imageData + y*image->widthStep + 3*(x+2)));
+
+ uchar h=0;
+ uchar s=0;
+ uchar v=0;
+ proc->RGBtoHSV(r, g, b, &h, &s, &v);
+
+
+ *((uchar*) (image->imageData + y*image->widthStep + 3*x)) = 128;
+ *((uchar*) (image->imageData + y*image->widthStep + 3*(x+1))) = 128;
+ *((uchar*) (image->imageData + y*image->widthStep + 3*(x+2))) = 255;
+
+ cv::Mat src(image);
+ //cv::imshow("test", src);
+
+ printf("got click (%d,%d)= <%d,%d,%d> -- [%d,%d,%d]\n",
+ x, y, r, g, b, h, s, v);
+}
+
+
+int main( int argc, char *argv[] ){
+ ProcessorData processor(use_width, use_height, USE_ROTATED);
+ int img_cycle = 0;
+ int thresh = 100;
+
+ cvStartWindowThread();
+
+ cvNamedWindow ("cnt", CV_WINDOW_AUTOSIZE);
+ cvNamedWindow ("GLOBAL", CV_WINDOW_AUTOSIZE);
+ //cvNamedWindow ("Grey Img", CV_WINDOW_AUTOSIZE);
+ //cvNamedWindow ("test", CV_WINDOW_AUTOSIZE);
+ cvNamedWindow (WINDOW_NAME2, CV_WINDOW_AUTOSIZE);
+ cvNamedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE);
+
+ cvMoveWindow(WINDOW_NAME,0,0);
+ cvMoveWindow("GLOBAL",325,0);
+ cvMoveWindow(WINDOW_NAME2,650,0);
+ //cvMoveWindow("Grey Img", 0, 275);
+ //cvMoveWindow("test", 325, 275);
+ cvMoveWindow("cnt",1100,100);
+ //Creating the trackbars
+ cvCreateTrackbar("H1","cnt",&processor.h1,360,0);
+ cvCreateTrackbar("H2","cnt",&processor.h2,360,0);
+ cvCreateTrackbar("S1","cnt",&processor.s1,255,0);
+ cvCreateTrackbar("S2","cnt",&processor.s2,255,0);
+ cvCreateTrackbar("V1","cnt",&processor.v1,255,0);
+ cvCreateTrackbar("V2","cnt",&processor.v2,255,0);
+
+ while (img_cycle >= 0) {
+ processor.clear();
+ printf("%d = %s\n", img_cycle, image_names[img_cycle]);
+ processor.src_header_image = cvLoadImage(image_names[img_cycle]);
+ cvCopy(processor.src_header_image, processor.global_display);
+
+ cv::setMouseCallback( WINDOW_NAME2, onMouse,
+ (void *)&processor );
+
+ cv::Mat global_mat(processor.global_display);
+ cv::Mat src_mat(processor.src_header_image);
+
+ // These lines are the vision processing, the rest of main
+ // is just fluff
+ processor.threshold((uchar *)
+ processor.src_header_image->imageData);
+ processor.getContours();
+ processor.filterToTargets();
+
+ if(!processor.target_list.empty()){
+ std::vector<std::vector<cv::Point> > target_contours;
+ std::vector<std::vector<cv::Point> > best_contours;
+ std::vector<std::vector<cv::Point> > raw_contours;
+ std::vector<Target>::iterator target_it;
+ Target *best_target = NULL;
+ int i = 0;
+ for(target_it = processor.target_list.begin();
+ target_it != processor.target_list.end(); target_it++){
+ target_contours.push_back(target_it->this_contour);
+ raw_contours.push_back(target_it->raw_contour);
+ printf("%d: h=%.1f, interp=%.1f, <x,y>=<%.1f,%.1f>\n",
+ i++, target_it->height,
+ interpolate(4, &pixel_to_dist[0], target_it->rect.centroid.x),
+ target_it->rect.centroid.x, target_it->rect.centroid.y);
+ if (best_target == NULL) {
+ best_target = &*target_it;
+ } else {
+ if (target_it->height > best_target->height) {
+ best_target = &*target_it;
+ }
+ /* if (processor.is_90) {
+ if (target_it->rect.centroid.x > best_target->rect.centroid.x) {
+ best_target = &*target_it;
+ }
+ } else {
+ if (target_it->rect.centroid.y < best_target->rect.centroid.y) {
+ best_target = &*target_it;
+ }
+ }*/
+ }
+ }
+ best_contours.push_back(best_target->this_contour);
+ //drawContours(global_mat,target_contours,-1,color,CV_FILLED);
+ cv::imshow(WINDOW_NAME, src_mat);
+ //cv::imshow("Grey Img", *processor.grey_mat);
+ cv::Scalar color(0,0,255);
+ cv::drawContours( src_mat, target_contours, -1, color, CV_FILLED );
+ cv::Scalar color2(128,0,255);
+ cv::drawContours( src_mat, best_contours, -1, color2, CV_FILLED );
+ cv::Scalar color3(0,255,0);
+ cv::drawContours( src_mat, raw_contours, -1, color3, 1 );
+ }
+ //cv::Mat grey_mat(grey_image);
+ //cv::imshow(WINDOW_NAME2, grey_mat);
+ cv::imshow("GLOBAL", global_mat);
+ cv::imshow(WINDOW_NAME2, src_mat);
+ char key = cvWaitKey(3000);
+ switch (key) {
+ case ' ':
+ img_cycle++;
+ img_cycle = img_cycle % num_names;
+ printf("%c %d= %s\n", key, img_cycle, image_names[img_cycle]);
+ break;
+ case 'g':
+ thresh++;
+ thresh = (thresh % 255);
+ printf("+ thresh= %d\n", thresh);
+ break;
+ case 'G':
+ thresh--;
+ thresh = (thresh % 255);
+ printf("- thresh= %d\n", thresh);
+ break;
+ case 'q':
+ img_cycle = -1;
+ break;
+ default:
+ break;
+ }
+ //redraw image cuz we drew all over it
+ }
+
+ cvDestroyWindow(WINDOW_NAME);
+}
+
diff --git a/vision/vision.gyp b/vision/vision.gyp
new file mode 100644
index 0000000..87c0ce1
--- /dev/null
+++ b/vision/vision.gyp
@@ -0,0 +1,33 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'OpenCVWorkTask',
+ 'type': 'executable',
+ 'sources': [
+ 'OpenCVWorkTask.cpp',
+ 'CameraProcessor.cpp',
+ 'BinaryServer.cpp',
+ 'PacketNotifier.cpp',
+ 'JPEGRoutines.cpp',
+ ],
+ 'dependencies': [
+ '<(AOS)/atom_code/atom_code.gyp:init',
+ '<(EXTERNALS):libevent',
+ '<(EXTERNALS):libjpeg',
+ '<(EXTERNALS):opencv',
+ '<(DEPTH)/frc971/queues/queues.gyp:queues',
+ ],
+ },
+ {
+ 'target_name': 'GoalMaster',
+ 'type': 'executable',
+ 'sources': [
+ 'GoalMaster.cpp',
+ ],
+ 'dependencies': [
+ '<(AOS)/atom_code/atom_code.gyp:init',
+ '<(DEPTH)/frc971/queues/queues.gyp:queues',
+ ],
+ },
+ ],
+}