Merge "Allow saving all images in a logfile to a directory"
diff --git a/aos/network/web_proxy.cc b/aos/network/web_proxy.cc
index f4da7d9..5b50072 100644
--- a/aos/network/web_proxy.cc
+++ b/aos/network/web_proxy.cc
@@ -67,6 +67,10 @@
         rtc::CopyOnWriteBuffer(buffer.data(), buffer.size()),
         true /* binary array */);
     for (auto conn : channels_) {
+      if (conn->buffered_amount() > 14000000) {
+        VLOG(1) << "skipping a send because buffered amount is too high";
+        continue;
+      }
       conn->Send(data_buffer);
     }
   }
@@ -160,7 +164,7 @@
   webrtc::DataBuffer data_buffer(
       rtc::CopyOnWriteBuffer(buffer.data(), buffer.size()),
       true /* binary array */);
-  VLOG(2) << "Sending " << buffer.size() << "bytes to a client";
+  VLOG(1) << "Sending " << buffer.size() << "bytes to a client";
   data_channel_->Send(data_buffer);
 }
 
diff --git a/aos/network/www/proxy.ts b/aos/network/www/proxy.ts
index 9eb0bb6..a675340 100644
--- a/aos/network/www/proxy.ts
+++ b/aos/network/www/proxy.ts
@@ -79,7 +79,7 @@
         'message', (e) => this.onWebSocketMessage(e));
   }
 
-  get config() {
+  getConfig() {
     return this.config_internal;
   }
 
@@ -88,7 +88,7 @@
   onDataChannelMessage(e: MessageEvent): void {
     const fbBuffer = new flatbuffers.ByteBuffer(new Uint8Array(e.data));
     this.configInternal = Configuration.getRootAsConfiguration(fbBuffer);
-    for (handler of this.configHandlers) {
+    for (const handler of Array.from(this.configHandlers)) {
       handler(this.configInternal);
     }
   }
@@ -187,7 +187,7 @@
    * @param a Finished flatbuffer.Builder containing a Connect message to send.
    */
   sendConnectMessage(builder: any) {
-    const array = builder.assUint8Array();
+    const array = builder.asUint8Array();
     this.dataChannel.send(array.buffer.slice(array.byteOffset));
   }
 }
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 194ddd3..0667b16 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -6,7 +6,7 @@
 class CameraIntrinsics:
     def __init__(self):
         self.camera_matrix = []
-        self.distortion_coeffs = []
+        self.dist_coeffs = []
 
     pass
 
@@ -40,7 +40,7 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5, 1))
+web_cam_int.dist_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
index 65d1b68..732dbb8 100644
--- a/y2020/vision/tools/python_code/camera_definition_test.py
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -6,7 +6,7 @@
 class CameraIntrinsics:
     def __init__(self):
         self.camera_matrix = []
-        self.distortion_coeffs = []
+        self.dist_coeffs = []
 
 
 class CameraExtrinsics:
@@ -38,7 +38,7 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5, 1))
+web_cam_int.dist_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
index 4fdc2fe..483fe75 100644
--- a/y2020/vision/tools/python_code/camera_param_test.cc
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -254,12 +254,12 @@
       << "Mismatch on camera intrinsic matrix: " << intrinsic << "\nvs.\n"
       << camera_params.camera_intrinsic_matrix_;
 
-  float dist_coeff_mat[5] = {0., 0., 0., 0., 0.};
-  cv::Mat dist_coeff = cv::Mat(5, 1, CV_32F, dist_coeff_mat);
-  cv::Mat dist_coeff_diff = (dist_coeff != camera_params.camera_dist_coeffs_);
-  bool dist_coeff_eq = (cv::countNonZero(dist_coeff_diff) == 0);
-  ASSERT_TRUE(dist_coeff_eq)
-      << "Mismatch on camera distortion coefficients: " << dist_coeff
+  float dist_coeffs_mat[5] = {0., 0., 0., 0., 0.};
+  cv::Mat dist_coeffs = cv::Mat(5, 1, CV_32F, dist_coeffs_mat);
+  cv::Mat dist_coeffs_diff = (dist_coeffs != camera_params.camera_dist_coeffs_);
+  bool dist_coeffs_eq = (cv::countNonZero(dist_coeffs_diff) == 0);
+  ASSERT_TRUE(dist_coeffs_eq)
+      << "Mismatch on camera distortion coefficients: " << dist_coeffs
       << "\nvs.\n"
       << camera_params.camera_dist_coeffs_;
 
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 9894ee2..8116fb7 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -216,16 +216,16 @@
 
 # Given 2d and 3d locations, and camera location and projection model,
 # display locations on an image
-def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, distortion_coeffs):
+def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, dist_coeffs):
     # Compute camera location
     # TODO: Warn on bad inliers
     # TODO: Change this to not have to recast to np
     pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1, 1, 2)
     pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1, 1, 3)
     retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat,
-                                               distortion_coeffs)
+                                               dist_coeffs)
     pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
-                                               distortion_coeffs)
+                                               dist_coeffs)
     if inliers is None:
         glog.warn("WARNING: Didn't get any inliers when reprojecting polygons")
         return img
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index 09d2764..455bc30 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -173,7 +173,7 @@
 
         # Load from camera parameters
         cam_mat = camera_params.camera_int.camera_matrix
-        distortion_coeffs = camera_params.camera_int.distortion_coeffs
+        dist_coeffs = camera_params.camera_int.dist_coeffs
 
         # Create list of matching query point locations
         dst_pts = np.float32([
@@ -184,7 +184,7 @@
         # TODO: May want to supply it with estimated guess as starting point
         # Find offset from camera to original location of camera relative to target
         retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
-            src_pts_3d_array, dst_pts, cam_mat, distortion_coeffs)
+            src_pts_3d_array, dst_pts, cam_mat, dist_coeffs)
 
         tf = time.monotonic()
         print("Solving Pose took ", tf - ts, " secs")
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index bf58e5f..0ce70b7 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -161,13 +161,12 @@
             fbb.PrependFloat32(n)
         intrinsics_vector = fbb.EndVector(len(camera_int_list))
 
-        dist_coeff_list = camera_calib.camera_int.distortion_coeffs.ravel(
-        ).tolist()
+        dist_coeffs_list = camera_calib.camera_int.dist_coeffs.ravel().tolist()
         CameraCalibration.CameraCalibrationStartDistCoeffsVector(
-            fbb, len(dist_coeff_list))
-        for n in reversed(dist_coeff_list):
+            fbb, len(dist_coeffs_list))
+        for n in reversed(dist_coeffs_list):
             fbb.PrependFloat32(n)
-        dist_coeff_vector = fbb.EndVector(len(dist_coeff_list))
+        dist_coeffs_vector = fbb.EndVector(len(dist_coeffs_list))
 
         node_name_offset = fbb.CreateString(camera_calib.node_name)
         CameraCalibration.CameraCalibrationStart(fbb)
@@ -177,7 +176,7 @@
         CameraCalibration.CameraCalibrationAddIntrinsics(
             fbb, intrinsics_vector)
         CameraCalibration.CameraCalibrationAddDistCoeffs(
-            fbb, dist_coeff_vector)
+            fbb, dist_coeffs_vector)
         CameraCalibration.CameraCalibrationAddFixedExtrinsics(
             fbb, fixed_extrinsics_vector)
         camera_calibration_vector.append(
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index eae358c..fb2f03a 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -384,7 +384,7 @@
                     dtd.visualize_reprojections(
                         img_copy, ideal_pts_tmp, ideal_pts_3d_tmp,
                         camera_params.camera_int.camera_matrix,
-                        camera_params.camera_int.distortion_coeffs)
+                        camera_params.camera_int.dist_coeffs)
 
             for polygon in ideal_target.polygon_list:
                 img_copy = ideal_target.image.copy()
@@ -401,7 +401,7 @@
                     np.asarray(kp_in_poly2d).reshape(-1, 2),
                     np.asarray(kp_in_poly3d).reshape(
                         -1, 3), camera_params.camera_int.camera_matrix,
-                    camera_params.camera_int.distortion_coeffs)
+                    camera_params.camera_int.dist_coeffs)
 
     ###############
     ### Compute 3D points on actual training images
@@ -510,7 +510,7 @@
                 dtd.visualize_reprojections(
                     img_copy, kp_tmp, training_target.keypoint_list_3d,
                     camera_params.camera_int.camera_matrix,
-                    camera_params.camera_int.distortion_coeffs)
+                    camera_params.camera_int.dist_coeffs)
 
     y2020_target_list = training_target_list
     return y2020_target_list
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index f139e71..1def399 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -41,7 +41,7 @@
         # Load image (in color; let opencv convert to B&W for features)
         img_data = cv2.imread(im)
         if img_data is None:
-            glog.error("Failed to load image: ", im)
+            glog.error("Failed to load image: '%s'" % im)
             exit()
         else:
             image_list.append(img_data)
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
index 00f68bb..01fe695 100644
--- a/y2020/vision/tools/python_code/transform_projection_test.py
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -8,7 +8,7 @@
 # Import camera from camera_definition
 camera_params = camera_definition.web_cam_params
 cam_mat = camera_params.camera_int.camera_matrix
-distortion_coeffs = camera_params.camera_int.distortion_coeffs
+dist_coeffs = camera_params.camera_int.dist_coeffs
 
 # Height of camera on robot above ground
 cam_above_ground = 0.5
@@ -48,11 +48,11 @@
 
 pts_proj_2d_cam2, jac_2d = cv2.projectPoints(
     pts_3d_target, R_cam_cam2_gt_mat_inv, T_cam_cam2_gt_inv, cam_mat,
-    distortion_coeffs)
+    dist_coeffs)
 
 # Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
 retval, R_cam2_cam_est, T_cam2_cam_est, inliers = cv2.solvePnPRansac(
-    pts_3d_target, pts_proj_2d_cam2, cam_mat, distortion_coeffs)
+    pts_3d_target, pts_proj_2d_cam2, cam_mat, dist_coeffs)
 
 # This is the pose from camera to original target spot.  We need to invert to get back to the pose we want
 R_cam_cam2_est_mat = cv2.Rodrigues(R_cam2_cam_est)[0].T
diff --git a/y2020/www/image_handler.ts b/y2020/www/image_handler.ts
index ae530ef..e2ba0b9 100644
--- a/y2020/www/image_handler.ts
+++ b/y2020/www/image_handler.ts
@@ -4,6 +4,7 @@
 export class ImageHandler {
   private canvas = document.createElement('canvas');
   private imageBuffer: Uint8ClampedArray|null = null;
+  private image: CameraImage|null = null;
   private imageTimestamp: flatbuffers.Long|null = null;
   private result: ImageMatchResult|null = null;
   private resultTimestamp: flatbuffers.Long|null = null;
@@ -16,6 +17,7 @@
   }
 
   handleImage(data: Uint8Array): void {
+    console.log('got an image to process');
     if (this.imageSkipCount != 0) {
       this.imageSkipCount--;
       return;
@@ -24,24 +26,28 @@
     }
 
     const fbBuffer = new flatbuffers.ByteBuffer(data);
-    const image = CameraImage.getRootAsCameraImage(fbBuffer);
-    this.imageTimestamp = image.monotonicTimestampNs();
+    this.image = CameraImage.getRootAsCameraImage(fbBuffer);
+    this.imageTimestamp = this.image.monotonicTimestampNs();
 
-    this.width = image.cols();
-    this.height = image.rows();
+    this.width = this.image.cols();
+    this.height = this.image.rows();
     if (this.width === 0 || this.height === 0) {
       return;
     }
-    this.imageBuffer = new Uint8ClampedArray(this.width * this.height * 4); // RGBA
 
+    this.draw();
+  }
+
+  convertImage(): void {
+    this.imageBuffer = new Uint8ClampedArray(this.width * this.height * 4); // RGBA
     // Read four bytes (YUYV) from the data and transform into two pixels of
     // RGBA for canvas
     for (const j = 0; j < this.height; j++) {
       for (const i = 0; i < this.width; i += 2) {
-        const y1 = image.data((j * this.width + i) * 2);
-        const u = image.data((j * this.width + i) * 2 + 1);
-        const y2 = image.data((j * this.width + i + 1) * 2);
-        const v = image.data((j * this.width + i + 1) * 2 + 1);
+        const y1 = this.image.data((j * this.width + i) * 2);
+        const u = this.image.data((j * this.width + i) * 2 + 1);
+        const y2 = this.image.data((j * this.width + i + 1) * 2);
+        const v = this.image.data((j * this.width + i + 1) * 2 + 1);
 
         // Based on https://en.wikipedia.org/wiki/YUV#Converting_between_Y%E2%80%B2UV_and_RGB
         const c1 = y1 - 16;
@@ -59,11 +65,10 @@
         this.imageBuffer[(j * this.width + i) * 4 + 7] = 255;
       }
     }
-
-    this.draw();
   }
 
   handleImageMetadata(data: Uint8Array): void {
+    console.log('got an image match result to process');
     const fbBuffer = new flatbuffers.ByteBuffer(data);
     this.result = ImageMatchResult.getRootAsImageMatchResult(fbBuffer);
     this.resultTimestamp = this.result.imageMonotonicTimestampNs();
@@ -74,8 +79,12 @@
   if (!this.imageTimestamp || !this.resultTimestamp ||
         this.imageTimestamp.low !== this.resultTimestamp.low ||
         this.imageTimestamp.high !== this.resultTimestamp.high) {
+      console.log('image and result do not match');
+      console.log(this.imageTimestamp.low, this.resultTimestamp.low);
+      console.log(this.imageTimestamp.high, this.resultTimestamp.high);
       return;
     }
+    this.convertImage();
     const ctx = this.canvas.getContext('2d');
 
     this.canvas.width = this.width;