Break out Features from fb data stream; add dist_coeffs; name cleanup
Passed through yapf, buildifier, clang-format
Send two separate messages, one with detailed features, one without
Change-Id: I70b2bca2d647cd03e2bc538a9dee68ed8155355a
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
index a6650fd..3fa33cf 100644
--- a/y2020/vision/sift/demo_sift_training.py
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -49,14 +49,14 @@
TrainingImage.TrainingImageStart(fbb)
TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
# TODO(Brian): Fill out the transformation matrices.
- training_image = TrainingImage.TrainingImageEnd(fbb)
+ training_image_offset = TrainingImage.TrainingImageEnd(fbb)
TrainingData.TrainingDataStartImagesVector(fbb, 1)
- fbb.PrependUOffsetTRelative(training_image)
- images = fbb.EndVector(1)
+ fbb.PrependUOffsetTRelative(training_image_offset)
+ images_offset = fbb.EndVector(1)
TrainingData.TrainingDataStart(fbb)
- TrainingData.TrainingDataAddImages(fbb, images)
+ TrainingData.TrainingDataAddImages(fbb, images_offset)
fbb.Finish(TrainingData.TrainingDataEnd(fbb))
bfbs = fbb.Output()
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 97c2b0a..8806957 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -60,8 +60,8 @@
}
table TransformationMatrix {
- // The matrix data. This is a row-major 4x4 matrix.
- // In other words, the bottom row is (0, 0, 0, 1).
+ // The matrix data for a row-major 4x4 homogeneous transformation matrix.
+ // This implies the bottom row is (0, 0, 0, 1).
data:[float];
}
@@ -97,6 +97,9 @@
// rotation around the Z axis by the turret angle
// turret_extrinsics
turret_extrinsics:TransformationMatrix;
+
+ // This is the standard OpenCV 5 parameter distortion coefficients
+ dist_coeffs:[float];
}
// Contains the information the EKF wants from an image matched against a single
@@ -128,6 +131,7 @@
// The matches from this image to each of the training images which matched.
// Each member is against the same captured image.
image_matches:[ImageMatch];
+
// The transformations for this image for each of the training images which
// matched.
// TODO(Brian): Include some kind of covariance information for these.
@@ -141,6 +145,10 @@
// Information about the camera which took this image.
camera_calibration:CameraCalibration;
+
+ // 2D image coordinate representing target location on the matched image
+ target_point_x:float;
+ target_point_y:float;
}
root_type ImageMatchResult;
diff --git a/y2020/vision/sift/sift_training.fbs b/y2020/vision/sift/sift_training.fbs
index 7391e76..d4fa740 100644
--- a/y2020/vision/sift/sift_training.fbs
+++ b/y2020/vision/sift/sift_training.fbs
@@ -10,6 +10,10 @@
// from the target to the field. See CameraPose in :sift_fbs for details of
// the conventions of this.
field_to_target:TransformationMatrix;
+
+ // 2D image coordinate representing target location on the training image
+ target_point_x:float;
+ target_point_y:float;
}
// Represents the information used to match incoming images against.