Be more robust to arbitrary incoming frames

Previously, bad skews could cause segfaults (which generated bad
noise covariances, which caused NaNs, which caused bad target matching,
which led to invalid array indices).

Test invalid inputs in more spots and do more sanitizing of inputs.

Change-Id: I5556bf020b7870b7224cf56e6a392d0f59f61fed
diff --git a/y2019/control_loops/drivetrain/camera.h b/y2019/control_loops/drivetrain/camera.h
index 2ab4b4f..d59ac21 100644
--- a/y2019/control_loops/drivetrain/camera.h
+++ b/y2019/control_loops/drivetrain/camera.h
@@ -213,14 +213,18 @@
     // This number is unitless and if greater than 1, implies that the target is
     // visible to the camera and if less than 1 implies it is too small to be
     // registered on the camera.
-    Scalar apparent_width =
-        ::std::max<Scalar>(0.0, ::std::cos(view->reading.skew) *
-                                    noise_parameters_.max_viewable_distance /
-                                    view->reading.distance);
+    const Scalar cosskew = ::std::cos(view->reading.skew);
+    Scalar apparent_width = ::std::max<Scalar>(
+        0.0, cosskew * noise_parameters_.max_viewable_distance /
+                 view->reading.distance);
+    // If we got wildly invalid distance or skew measurements, then set a very
+    // small apparent width.
+    if (view->reading.distance < 0 || cosskew < 0) {
+      apparent_width = 0.01;
+    }
     // As both a sanity check and for the sake of numerical stability, manually
-    // set apparent_width to something "very small" if the distance is too
-    // close.
-    if (view->reading.distance < 0.01) {
+    // set apparent_width to something "very small" if it is near zero.
+    if (apparent_width < 0.01) {
       apparent_width = 0.01;
     }
 
diff --git a/y2019/control_loops/drivetrain/camera_test.cc b/y2019/control_loops/drivetrain/camera_test.cc
index ce1a2a4..d0a4b85 100644
--- a/y2019/control_loops/drivetrain/camera_test.cc
+++ b/y2019/control_loops/drivetrain/camera_test.cc
@@ -154,10 +154,11 @@
   EXPECT_EQ(1u, camera_.target_views().size());
 }
 
+using Reading = TestCamera::TargetView::Reading;
+
 // Checks that reading noises have the expected characteristics (mostly, going
 // up linearly with distance):
 TEST_F(CameraTest, DistanceNoiseTest) {
-  using Reading = TestCamera::TargetView::Reading;
   const Reading normal_noise = camera_.target_views()[0].noise;
   // Get twice as close:
   base_pose_.mutable_pos()->y() /= 2.0;
@@ -169,6 +170,40 @@
   EXPECT_EQ(normal_noise.heading, closer_noise.heading);
 }
 
+class CameraViewParamTest : public CameraTest,
+                            public ::testing::WithParamInterface<Reading> {};
+
+// Checks that invalid or absurd measurements result in large but finite noises
+// and non-visible targets.
+TEST_P(CameraViewParamTest, InvalidReading) {
+  TestCamera::TargetView view;
+  view.reading = GetParam();
+  bool visible = true;
+  camera_.PopulateNoise(&view, &visible);
+  // Target should not be visible
+  EXPECT_FALSE(visible);
+  // We should end up with finite but very large noises when things are invalid.
+  EXPECT_TRUE(::std::isfinite(view.noise.heading));
+  EXPECT_TRUE(::std::isfinite(view.noise.distance));
+  EXPECT_TRUE(::std::isfinite(view.noise.skew));
+  EXPECT_TRUE(::std::isfinite(view.noise.height));
+  // Don't check heading noise because it is always constant.
+  EXPECT_LT(10, view.noise.distance);
+  EXPECT_LT(10, view.noise.skew);
+  EXPECT_LT(5, view.noise.height);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    InvalidMeasurements, CameraViewParamTest,
+    ::testing::Values(
+        // heading, distance, height, skew
+        Reading({100.0, -10.0, -10.0, -3.0}), Reading({0.0, 1.0, 0.0, -3.0}),
+        Reading({0.0, 1.0, 0.0, 3.0}), Reading({0.0, 1.0, 0.0, 9.0}),
+        Reading({0.0, ::std::numeric_limits<double>::quiet_NaN(), 0.0, 0.0}),
+        Reading({0.0, ::std::numeric_limits<double>::infinity(), 0.0, 0.0}),
+        Reading({0.0, 1.0, 0.0, ::std::numeric_limits<double>::infinity()}),
+        Reading({0.0, 1.0, 0.0, ::std::numeric_limits<double>::quiet_NaN()})));
+
 }  // namespace testing
 }  // namespace control_loops
 }  // namespace y2019
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.cc b/y2019/control_loops/drivetrain/event_loop_localizer.cc
index 981701a..d0737fe 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.cc
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.cc
@@ -58,6 +58,16 @@
 void EventLoopLocalizer::HandleFrame(const CameraFrame &frame) {
   // We need to construct TargetView's and pass them to the localizer:
   ::aos::SizedArray<TargetView, kMaxTargetsPerFrame> views;
+  // Note: num_targets and camera are unsigned integers and so don't need to be
+  // checked for < 0.
+  if (frame.num_targets > kMaxTargetsPerFrame) {
+    LOG(ERROR, "Got bad num_targets %d\n", frame.num_targets);
+    return;
+  }
+  if (frame.camera > cameras_.size()) {
+    LOG(ERROR, "Got bad camera number %d\n", frame.camera);
+    return;
+  }
   for (int ii = 0; ii < frame.num_targets; ++ii) {
     TargetView view;
     view.reading.heading = frame.targets[ii].heading;
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
index 3dde98d..142024f 100644
--- a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -135,10 +135,18 @@
            ++jj) {
         EventLoopLocalizer::TargetView view = target_views[jj];
         ++frame.num_targets;
-        frame.targets[jj].heading = view.reading.heading;
-        frame.targets[jj].distance = view.reading.distance;
-        frame.targets[jj].skew = view.reading.skew;
-        frame.targets[jj].height = view.reading.height;
+        const float nan = ::std::numeric_limits<float>::quiet_NaN();
+        if (send_bad_frames_) {
+          frame.targets[jj].heading = nan;
+          frame.targets[jj].distance = nan;
+          frame.targets[jj].skew = nan;
+          frame.targets[jj].height = nan;
+        } else {
+          frame.targets[jj].heading = view.reading.heading;
+          frame.targets[jj].distance = view.reading.distance;
+          frame.targets[jj].skew = view.reading.skew;
+          frame.targets[jj].height = view.reading.height;
+        }
       }
       camera_delay_queue_.emplace(monotonic_clock::now(), frame);
     }
@@ -183,9 +191,11 @@
       camera_delay_queue_;
 
   void set_enable_cameras(bool enable) { enable_cameras_ = enable; }
+  void set_bad_frames(bool enable) { send_bad_frames_ = enable; }
 
  private:
   bool enable_cameras_ = false;
+  bool send_bad_frames_ = false;
 };
 
 // Tests that no camera updates, combined with a perfect model, results in no
@@ -202,6 +212,20 @@
   VerifyEstimatorAccurate(1e-10);
 }
 
+// Bad camera updates (NaNs) should have no effect.
+TEST_F(LocalizedDrivetrainTest, BadCameraUpdate) {
+  set_enable_cameras(true);
+  set_bad_frames(true);
+  my_drivetrain_queue_.goal.MakeWithBuilder()
+      .controller_type(1)
+      .left_goal(-1.0)
+      .right_goal(1.0)
+      .Send();
+  RunForTime(chrono::seconds(3));
+  VerifyNearGoal();
+  VerifyEstimatorAccurate(1e-10);
+}
+
 // Tests that camera udpates with a perfect models results in no errors.
 TEST_F(LocalizedDrivetrainTest, PerfectCameraUpdate) {
   set_enable_cameras(true);
diff --git a/y2019/control_loops/drivetrain/localizer.h b/y2019/control_loops/drivetrain/localizer.h
index 095c8a9..f8849ce 100644
--- a/y2019/control_loops/drivetrain/localizer.h
+++ b/y2019/control_loops/drivetrain/localizer.h
@@ -53,6 +53,11 @@
       return;
     }
 
+    if (!SanitizeTargets(targets)) {
+      LOG(ERROR, "Throwing out targets due to in insane values.\n");
+      return;
+    }
+
     if (t > HybridEkf::latest_t()) {
       LOG(ERROR,
           "target observations must be older than most recent encoder/gyro "
@@ -97,11 +102,36 @@
   // TODO(james): Tune
   static constexpr Scalar kRejectionScore = 1.0;
 
+  // Checks that the targets coming in make some sense--mostly to prevent NaNs
+  // or the such from propagating.
+  bool SanitizeTargets(
+      const ::aos::SizedArray<TargetView, max_targets_per_frame> &targets) {
+    for (const TargetView &view : targets) {
+      const typename TargetView::Reading reading = view.reading;
+      if (!(::std::isfinite(reading.heading) &&
+            ::std::isfinite(reading.distance) &&
+            ::std::isfinite(reading.skew) && ::std::isfinite(reading.height))) {
+        LOG(ERROR, "Got non-finite values in target.\n");
+        return false;
+      }
+      if (reading.distance < 0) {
+        LOG(ERROR, "Got negative distance.\n");
+        return false;
+      }
+      if (::std::abs(::aos::math::NormalizeAngle(reading.skew)) > M_PI_2) {
+        LOG(ERROR, "Got skew > pi / 2.\n");
+        return false;
+      }
+    }
+    return true;
+  }
+
   // Computes the measurement (z) and noise covariance (R) matrices for a given
   // TargetView.
   void TargetViewToMatrices(const TargetView &view, Output *z,
                             Eigen::Matrix<Scalar, kNOutputs, kNOutputs> *R) {
-    *z << view.reading.heading, view.reading.distance, view.reading.skew;
+    *z << view.reading.heading, view.reading.distance,
+        ::aos::math::NormalizeAngle(view.reading.skew);
     // TODO(james): R should account as well for our confidence in the target
     // matching. However, handling that properly requires thing a lot more about
     // the probabilities.
@@ -273,7 +303,11 @@
       ::std::array<int, max_targets_per_frame> best_frames =
           MatchFrames(scores, best_matches, target_views.size());
       for (size_t ii = 0; ii < target_views.size(); ++ii) {
-        int view_idx = best_frames[ii];
+        size_t view_idx = best_frames[ii];
+        if (view_idx < 0 || view_idx >= camera_views.size()) {
+          LOG(ERROR, "Somehow, the view scorer failed.\n");
+          continue;
+        }
         const Eigen::Matrix<Scalar, kNOutputs, kNStates> best_H =
             all_H_matrices[ii][view_idx];
         const TargetView best_view = camera_views[view_idx];
@@ -364,6 +398,7 @@
           best_matches,
       int n_views) {
     ::std::array<int, max_targets_per_frame> best_set;
+    best_set.fill(-1);
     Scalar best_score;
     // We start out without having "used" any views/targets:
     ::aos::SizedArray<bool, max_targets_per_frame> used_views;
diff --git a/y2019/control_loops/drivetrain/localizer_test.cc b/y2019/control_loops/drivetrain/localizer_test.cc
index 3b09e64..8b6f7ed 100644
--- a/y2019/control_loops/drivetrain/localizer_test.cc
+++ b/y2019/control_loops/drivetrain/localizer_test.cc
@@ -327,6 +327,8 @@
   ::std::normal_distribution<> normal_;
 };
 
+using ::std::chrono::milliseconds;
+
 // Tests that, when we attempt to follow a spline and use the localizer to
 // perform the state estimation, we end up roughly where we should and that
 // the localizer has a valid position estimate.
@@ -340,15 +342,20 @@
   // we just trigger all the cameras at once, rather than offsetting them or
   // anything.
   const int camera_period = 5; // cycles
-  // The amount of time to delay the camera images from when they are taken.
-  const ::std::chrono::milliseconds camera_latency(50);
+  // The amount of time to delay the camera images from when they are taken, for
+  // each camera.
+  const ::std::array<milliseconds, 4> camera_latencies{
+      {milliseconds(45), milliseconds(50), milliseconds(55),
+       milliseconds(100)}};
 
-  // A queue of camera frames so that we can add a time delay to the data
-  // coming from the cameras.
-  ::std::queue<::std::tuple<
-      ::aos::monotonic_clock::time_point, const TestCamera *,
-      ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>>>
-      camera_queue;
+  // A queue of camera frames for each camera so that we can add a time delay to
+  // the data coming from the cameras.
+  ::std::array<
+      ::std::queue<::std::tuple<
+          ::aos::monotonic_clock::time_point, const TestCamera *,
+          ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>>>,
+      4>
+      camera_queues;
 
   // Start time, for debugging.
   const auto begin = ::std::chrono::steady_clock::now();
@@ -431,34 +438,37 @@
                                      right_enc + Normal(encoder_noise_),
                                      gyro + Normal(gyro_noise_), U, t);
 
-    // Clear out the camera frames that we are ready to pass to the localizer.
-    while (!camera_queue.empty() &&
-           ::std::get<0>(camera_queue.front()) < t - camera_latency) {
-      const auto tuple = camera_queue.front();
-      camera_queue.pop();
-      ::aos::monotonic_clock::time_point t_obs = ::std::get<0>(tuple);
-      const TestCamera *camera = ::std::get<1>(tuple);
-      ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame> views =
-          ::std::get<2>(tuple);
-      localizer_.UpdateTargets(*camera, views, t_obs);
-    }
+    for (size_t cam_idx = 0; cam_idx < camera_queues.size(); ++cam_idx) {
+      auto &camera_queue = camera_queues[cam_idx];
+      // Clear out the camera frames that we are ready to pass to the localizer.
+      while (!camera_queue.empty() && ::std::get<0>(camera_queue.front()) <
+                                          t - camera_latencies[cam_idx]) {
+        const auto tuple = camera_queue.front();
+        camera_queue.pop();
+        ::aos::monotonic_clock::time_point t_obs = ::std::get<0>(tuple);
+        const TestCamera *camera = ::std::get<1>(tuple);
+        ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame> views =
+            ::std::get<2>(tuple);
+        localizer_.UpdateTargets(*camera, views, t_obs);
+      }
 
-    // Actually take all the images and store them in the queue.
-    if (i % camera_period == 0) {
-      for (size_t jj = 0; jj < true_cameras_.size(); ++jj) {
-        const auto target_views = true_cameras_[jj].target_views();
-        ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>
-            pass_views;
-        for (size_t ii = 0;
-             ii < ::std::min(kNumTargetsPerFrame, target_views.size()); ++ii) {
-          TestCamera::TargetView view = target_views[ii];
-          Noisify(&view);
-          pass_views.push_back(view);
+      // Actually take all the images and store them in the queue.
+      if (i % camera_period == 0) {
+        for (size_t jj = 0; jj < true_cameras_.size(); ++jj) {
+          const auto target_views = true_cameras_[jj].target_views();
+          ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>
+              pass_views;
+          for (size_t ii = 0;
+               ii < ::std::min(kNumTargetsPerFrame, target_views.size());
+               ++ii) {
+            TestCamera::TargetView view = target_views[ii];
+            Noisify(&view);
+            pass_views.push_back(view);
+          }
+          camera_queue.emplace(t, &robot_cameras_[jj], pass_views);
         }
-        camera_queue.emplace(t, &robot_cameras_[jj], pass_views);
       }
     }
-
   }
 
   const auto end = ::std::chrono::steady_clock::now();
@@ -604,7 +614,7 @@
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/false,
-            /*estimate_tolerance=*/0.1,
+            /*estimate_tolerance=*/0.15,
             /*goal_tolerance=*/0.5,
         })));