blob: ff53b7b00a2b446aa1e9ed1ec1ff59bc767d947c [file] [log] [blame]
James Kuszmaul57c7c9b2019-01-27 16:16:01 -08001#ifndef Y2019_CONTROL_LOOPS_DRIVETRAIN_CAMERA_H_
2#define Y2019_CONTROL_LOOPS_DRIVETRAIN_CAMERA_H_
3
4#include <vector>
5
6#include "aos/containers/sized_array.h"
7#include "frc971/control_loops/pose.h"
8
9namespace y2019 {
10namespace control_loops {
11
12// Represents a target on the field. Currently just consists of a pose and a
13// indicator for whether it is occluded (occlusion is only used by the simulator
14// for testing).
15// Orientation convention:
16// -The absolute position of the pose is the center of the vision target on the
17// field.
18// -The yaw of the pose shall be such that the positive X-axis in the Target's
19// frame wil be pointed straight through the target--i.e., if you are looking
20// at the target head-on, then you will be facing in the same direction as the
21// positive X-axis.
22// E.g., if the Target has a global position of (1, 1, 0) and yaw of pi / 2,
23// then it is at position (1, 1, 0) on the field and is oriented so that if
24// someone were to stand at (1, 0, 0) and turn themselves to have a yaw of
25// pi / 2, they would see the target 1 meter straight ahead of them.
26//
27// Generally, the position of a target should not change dynamically; if we do
28// start having targets that move, we may want to start optimizing certain
29// things (e.g., caching the position of the Target--currently, if the Pose of a
30// target is in an absolute frame, then calling abs_pos will be inexpensive; if
31// that changes, then we start having to recalculate transformations on every
32// frame).
33template <typename Scalar = double>
34class TypedTarget {
35 public:
36 typedef ::frc971::control_loops::TypedPose<Scalar> Pose;
37 TypedTarget(const Pose &pose) : pose_(pose) {}
James Kuszmaul090563a2019-02-09 14:43:20 -080038 TypedTarget() {}
James Kuszmaul57c7c9b2019-01-27 16:16:01 -080039 Pose pose() const { return pose_; }
40
41 bool occluded() const { return occluded_; }
42 void set_occluded(bool occluded) { occluded_ = occluded; }
43
44 // Get a list of points for plotting. These points should be plotted on
45 // an x/y plane in the global frame with lines connecting the points.
46 // Essentially, this provides a Polygon that is a reasonable representation
47 // of a Target.
48 // This should not be called from real-time code, as it will probably
49 // dynamically allocate memory.
50 ::std::vector<Pose> PlotPoints() const {
51 // For the actual representation, we will use a triangle such that the
52 // base of the triangle corresponds to the surface the target is on.
53 // The third point is shown behind the target, so that the user can
54 // visually identify which side of the target is the front.
55 Pose base1(&pose_, {0, 0.125, 0}, 0);
56 Pose base2(&pose_, {0, -0.125, 0}, 0);
57 Pose behind(&pose_, {0.05, 0, 0}, 0);
58 // Include behind at the start and end to indicate that we want to draw
59 // a closed polygon.
60 return {behind, base1, base2, behind};
61 }
62
63 private:
64 Pose pose_;
65 bool occluded_ = false;
66}; // class TypedTarget
67
68typedef TypedTarget<double> Target;
69
70// Represents a camera that can see targets and provide information about their
71// relative positions.
72// Note on coordinate systems:
73// -The camera's Pose shall be such that the camera frame's positive X-axis is
74// pointed straight out of the lens (as always, positive Z will be up; we
75// assume that all cameras mounted level, or compensated for such that this
76// code won't care).
77// -The origin of the camera shall be "at" the camera. For this code, I don't
78// think we care too much about the details of the camera model, so we can just
79// assume that it is an idealized pinhole camera with the pinhole being the
80// location of the camera.
81//
82// Template parameters:
83// -num_targets: The number of targets on the field, should be the same for
84// all the actual cameras on the robot (although it may change in tests).
85// -Scalar: The floating point type to use (double vs. float).
86// -num_obstacles: The number of obstacles on the field to account for; like
87// the number of targets, it should be consistent across actual cameras,
88// although for simulation we may add extra obstacles for testing.
89template <int num_targets, int num_obstacles, typename Scalar = double>
90class TypedCamera {
91 public:
92 typedef ::frc971::control_loops::TypedPose<Scalar> Pose;
93 typedef ::frc971::control_loops::TypedLineSegment<Scalar> LineSegment;
94
95 // TargetView contains the information associated with a sensor reading
96 // from the camera--the readings themselves and noise values, *from the
97 // Camera's persective* for each reading.
98 // Note that the noise terms are just accounting for the inaccuracy you
99 // expect to get due to visual noise, pixel-level resolution, etc. These
100 // do not account for the fact that, e.g., there is noise in the Pose of the
101 // robot which can translate into noise in the target reading.
102 // The noise terms are standard deviations, and so have units identical
103 // to that of the actual readings.
104 struct TargetView {
105 struct Reading {
106 // The heading as reported from the camera; zero = straight ahead,
107 // positive = target in the left half of the image.
108 Scalar heading; // radians
109 // The distance from the camera to the target.
110 Scalar distance; // meters
111 // Height of the target from the floor.
112 Scalar height; // meters
113 // The angle of the target relative to the frame of the camera.
114 Scalar skew; // radians
115 };
116 Reading reading;
117 Reading noise;
118
119 // The target that this view corresponds to.
120 const TypedTarget<Scalar> *target;
James Kuszmaul090563a2019-02-09 14:43:20 -0800121 // The Pose the camera was at when viewing the target:
122 Pose camera_pose;
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800123 };
124
125 // Important parameters for dealing with camera noise calculations.
126 // Ultimately, this should end up coming from the constants file.
127 struct NoiseParameters {
128 // The maximum distance from which we can see a target head-on (when the
129 // target is not head-on, we adjust for that).
130 Scalar max_viewable_distance; // meters
131
132 // All noises are standard deviations of the noise, assuming an ~normal
133 // distribution.
134
135 // Noise in the heading measurement, which should be constant regardless of
136 // other factors.
137 Scalar heading_noise; // radians
138 // Noise in the distance measurement when the target is 1m away and head-on
139 // to us. This is adjusted by assuming the noise is proportional to the
140 // apparent width of the target (because the target also has height, this
141 // may not be strictly correct).
142 // TODO(james): Is this a good model? It should be reasonable, but there
143 // may be more complexity somewhere.
144 Scalar nominal_distance_noise; // meters
145 // The noise in the skew measurement when the target is 1m away and head-on
146 // to us. Calculated in the same manner with the same caveats as the
147 // distance noise.
148 Scalar nominal_skew_noise; // radians
149 // Noise in the height measurement, same rules as for skew and distance.
150 // TODO(james): Figure out how much noise we will actually get in the
151 // height, since there will be extremely low resolution on it.
152 Scalar nominal_height_noise; // meters
153 };
154
155 // Creates a camera:
156 // pose: The Pose of the camera, relative to the robot at least transitively.
157 // fov: The field-of-view of the camera, in radians. Note that this is the
158 // *total* field-of-view in the horizontal plane (left-right), so the angle
159 // from the left edge of the image to the right edge.
160 // targets: The list of targets on the field that could be seen by the camera.
161 // obstacles: The list of known obstacles on the field.
162 TypedCamera(const Pose &pose, Scalar fov,
163 const NoiseParameters &noise_parameters,
164 const ::std::array<TypedTarget<Scalar>, num_targets> &targets,
165 const ::std::array<LineSegment, num_obstacles> &obstacles)
166 : pose_(pose),
167 fov_(fov),
168 noise_parameters_(noise_parameters),
169 targets_(targets),
170 obstacles_(obstacles) {}
171
172 // Returns a list of TargetViews for all the currently visible targets.
173 // These will contain ground-truth TargetViews, so the readings will be
174 // perfect; a pseudo-random number generator should be used to add noise
175 // separately for simulation.
176 ::aos::SizedArray<TargetView, num_targets> target_views() const {
177 ::aos::SizedArray<TargetView, num_targets> views;
James Kuszmaul090563a2019-02-09 14:43:20 -0800178 Pose camera_abs_pose = pose_.Rebase(nullptr);
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800179 // Because there are num_targets in targets_ and because AddTargetIfVisible
180 // adds at most 1 view to views, we should never exceed the size of
181 // SizedArray.
182 for (const auto &target : targets_) {
James Kuszmaul090563a2019-02-09 14:43:20 -0800183 AddTargetIfVisible(target, camera_abs_pose, &views);
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800184 }
185 return views;
186 }
187
188 // Returns a list of list of points for plotting. Each list of points should
189 // be plotted as a line; currently, each list is just a pair drawing a line
190 // from the camera aperture to the target location.
191 // This should not be called from real-time code, as it will probably
192 // dynamically allocate memory.
193 ::std::vector<::std::vector<Pose>> PlotPoints() const {
194 ::std::vector<::std::vector<Pose>> list_of_lists;
195 for (const auto &view : target_views()) {
James Kuszmaul090563a2019-02-09 14:43:20 -0800196 list_of_lists.push_back({pose_, view.target->pose()});
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800197 }
198 return list_of_lists;
199 }
200
James Kuszmaul090563a2019-02-09 14:43:20 -0800201 const Pose &pose() const { return pose_; }
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800202 Scalar fov() const { return fov_; }
203
204 private:
205
206 // If the specified target is visible from the current camera Pose, adds it to
207 // the views array.
208 void AddTargetIfVisible(
James Kuszmaul090563a2019-02-09 14:43:20 -0800209 const TypedTarget<Scalar> &target, const Pose &camera_abs_pose,
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800210 ::aos::SizedArray<TargetView, num_targets> *views) const;
211
212 // The Pose of this camera.
213 const Pose pose_;
James Kuszmaul090563a2019-02-09 14:43:20 -0800214
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800215 // Field of view of the camera, in radians.
216 const Scalar fov_;
217
218 // Various constants to calclate sensor noise; see definition of
219 // NoiseParameters for more detail.
220 const NoiseParameters noise_parameters_;
221
222 // A list of all the targets on the field.
James Kuszmaul090563a2019-02-09 14:43:20 -0800223 // TODO(james): Is it worth creating some sort of cache for the targets and
224 // obstacles? e.g., passing around pointer to the targets/obstacles.
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800225 const ::std::array<TypedTarget<Scalar>, num_targets> targets_;
226 // Known obstacles on the field which can interfere with our view of the
227 // targets. An "obstacle" is a line segment which we cannot see through, as
228 // such a logical obstacle (e.g., the cargo ship) may consist of many
229 // obstacles in this list to account for all of its sides.
230 const ::std::array<LineSegment, num_obstacles> obstacles_;
231}; // class TypedCamera
232
233template <int num_targets, int num_obstacles, typename Scalar>
234void TypedCamera<num_targets, num_obstacles, Scalar>::AddTargetIfVisible(
James Kuszmaul090563a2019-02-09 14:43:20 -0800235 const TypedTarget<Scalar> &target, const Pose &camera_abs_pose,
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800236 ::aos::SizedArray<TargetView, num_targets> *views) const {
237 if (target.occluded()) {
238 return;
239 }
240
241 // Precompute the current absolute pose of the camera, because we will reuse
242 // it a bunch.
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800243 const Pose relative_pose = target.pose().Rebase(&camera_abs_pose);
244 const Scalar heading = relative_pose.heading();
245 const Scalar distance = relative_pose.xy_norm();
246 const Scalar skew = ::aos::math::NormalizeAngle(relative_pose.rel_theta());
247
248 // Check if the camera is in the angular FOV.
249 if (::std::abs(heading) > fov_ / 2.0) {
250 return;
251 }
252
253 // Calculate the width of the target as it appears in the image.
254 // This number is unitless and if greater than 1, implies that the target is
255 // visible to the camera and if less than 1 implies it is too small to be
256 // registered on the camera.
257 const Scalar apparent_width =
258 ::std::max(0.0, ::std::cos(skew) *
259 noise_parameters_.max_viewable_distance / distance);
260
261 if (apparent_width < 1.0) {
262 return;
263 }
264
265 // Final visibility check is for whether there are any obstacles blocking or
266 // line of sight.
267 for (const auto &obstacle : obstacles_) {
268 if (obstacle.Intersects({camera_abs_pose, target.pose()})) {
269 return;
270 }
271 }
272
273 Scalar normalized_width =
274 apparent_width / noise_parameters_.max_viewable_distance;
275 Scalar distance_noise =
276 noise_parameters_.nominal_distance_noise / normalized_width;
277 Scalar skew_noise = noise_parameters_.nominal_skew_noise / normalized_width;
278 Scalar height_noise =
279 noise_parameters_.nominal_height_noise / normalized_width;
280
281 // If we've made it here, the target is visible to the camera and we should
282 // provide the actual TargetView in question.
283 TargetView view;
284 view.reading = {heading, distance, target.pose().abs_pos().z(), skew};
285 view.noise = {noise_parameters_.heading_noise, distance_noise, height_noise,
286 skew_noise};
287 view.target = &target;
James Kuszmaul090563a2019-02-09 14:43:20 -0800288 view.camera_pose = camera_abs_pose;
James Kuszmaul57c7c9b2019-01-27 16:16:01 -0800289 views->push_back(view);
290}
291
292} // namespace control_loops
293} // namespace y2019
294
295#endif // Y2019_CONTROL_LOOPS_DRIVETRAIN_CAMERA_H_