blob: 7135e8b39a99c431b91583b469432baa0f24d0c5 [file] [log] [blame]
James Kuszmaul57c7c9b2019-01-27 16:16:01 -08001#ifndef Y2019_CONTROL_LOOPS_DRIVETRAIN_CAMERA_H_
2#define Y2019_CONTROL_LOOPS_DRIVETRAIN_CAMERA_H_
3
4#include <vector>
5
6#include "aos/containers/sized_array.h"
7#include "frc971/control_loops/pose.h"
8
9namespace y2019 {
10namespace control_loops {
11
12// Represents a target on the field. Currently just consists of a pose and a
13// indicator for whether it is occluded (occlusion is only used by the simulator
14// for testing).
15// Orientation convention:
16// -The absolute position of the pose is the center of the vision target on the
17// field.
18// -The yaw of the pose shall be such that the positive X-axis in the Target's
19// frame wil be pointed straight through the target--i.e., if you are looking
20// at the target head-on, then you will be facing in the same direction as the
21// positive X-axis.
22// E.g., if the Target has a global position of (1, 1, 0) and yaw of pi / 2,
23// then it is at position (1, 1, 0) on the field and is oriented so that if
24// someone were to stand at (1, 0, 0) and turn themselves to have a yaw of
25// pi / 2, they would see the target 1 meter straight ahead of them.
26//
27// Generally, the position of a target should not change dynamically; if we do
28// start having targets that move, we may want to start optimizing certain
29// things (e.g., caching the position of the Target--currently, if the Pose of a
30// target is in an absolute frame, then calling abs_pos will be inexpensive; if
31// that changes, then we start having to recalculate transformations on every
32// frame).
33template <typename Scalar = double>
34class TypedTarget {
35 public:
36 typedef ::frc971::control_loops::TypedPose<Scalar> Pose;
37 TypedTarget(const Pose &pose) : pose_(pose) {}
38 Pose pose() const { return pose_; }
39
40 bool occluded() const { return occluded_; }
41 void set_occluded(bool occluded) { occluded_ = occluded; }
42
43 // Get a list of points for plotting. These points should be plotted on
44 // an x/y plane in the global frame with lines connecting the points.
45 // Essentially, this provides a Polygon that is a reasonable representation
46 // of a Target.
47 // This should not be called from real-time code, as it will probably
48 // dynamically allocate memory.
49 ::std::vector<Pose> PlotPoints() const {
50 // For the actual representation, we will use a triangle such that the
51 // base of the triangle corresponds to the surface the target is on.
52 // The third point is shown behind the target, so that the user can
53 // visually identify which side of the target is the front.
54 Pose base1(&pose_, {0, 0.125, 0}, 0);
55 Pose base2(&pose_, {0, -0.125, 0}, 0);
56 Pose behind(&pose_, {0.05, 0, 0}, 0);
57 // Include behind at the start and end to indicate that we want to draw
58 // a closed polygon.
59 return {behind, base1, base2, behind};
60 }
61
62 private:
63 Pose pose_;
64 bool occluded_ = false;
65}; // class TypedTarget
66
67typedef TypedTarget<double> Target;
68
69// Represents a camera that can see targets and provide information about their
70// relative positions.
71// Note on coordinate systems:
72// -The camera's Pose shall be such that the camera frame's positive X-axis is
73// pointed straight out of the lens (as always, positive Z will be up; we
74// assume that all cameras mounted level, or compensated for such that this
75// code won't care).
76// -The origin of the camera shall be "at" the camera. For this code, I don't
77// think we care too much about the details of the camera model, so we can just
78// assume that it is an idealized pinhole camera with the pinhole being the
79// location of the camera.
80//
81// Template parameters:
82// -num_targets: The number of targets on the field, should be the same for
83// all the actual cameras on the robot (although it may change in tests).
84// -Scalar: The floating point type to use (double vs. float).
85// -num_obstacles: The number of obstacles on the field to account for; like
86// the number of targets, it should be consistent across actual cameras,
87// although for simulation we may add extra obstacles for testing.
88template <int num_targets, int num_obstacles, typename Scalar = double>
89class TypedCamera {
90 public:
91 typedef ::frc971::control_loops::TypedPose<Scalar> Pose;
92 typedef ::frc971::control_loops::TypedLineSegment<Scalar> LineSegment;
93
94 // TargetView contains the information associated with a sensor reading
95 // from the camera--the readings themselves and noise values, *from the
96 // Camera's persective* for each reading.
97 // Note that the noise terms are just accounting for the inaccuracy you
98 // expect to get due to visual noise, pixel-level resolution, etc. These
99 // do not account for the fact that, e.g., there is noise in the Pose of the
100 // robot which can translate into noise in the target reading.
101 // The noise terms are standard deviations, and so have units identical
102 // to that of the actual readings.
103 struct TargetView {
104 struct Reading {
105 // The heading as reported from the camera; zero = straight ahead,
106 // positive = target in the left half of the image.
107 Scalar heading; // radians
108 // The distance from the camera to the target.
109 Scalar distance; // meters
110 // Height of the target from the floor.
111 Scalar height; // meters
112 // The angle of the target relative to the frame of the camera.
113 Scalar skew; // radians
114 };
115 Reading reading;
116 Reading noise;
117
118 // The target that this view corresponds to.
119 const TypedTarget<Scalar> *target;
120 };
121
122 // Important parameters for dealing with camera noise calculations.
123 // Ultimately, this should end up coming from the constants file.
124 struct NoiseParameters {
125 // The maximum distance from which we can see a target head-on (when the
126 // target is not head-on, we adjust for that).
127 Scalar max_viewable_distance; // meters
128
129 // All noises are standard deviations of the noise, assuming an ~normal
130 // distribution.
131
132 // Noise in the heading measurement, which should be constant regardless of
133 // other factors.
134 Scalar heading_noise; // radians
135 // Noise in the distance measurement when the target is 1m away and head-on
136 // to us. This is adjusted by assuming the noise is proportional to the
137 // apparent width of the target (because the target also has height, this
138 // may not be strictly correct).
139 // TODO(james): Is this a good model? It should be reasonable, but there
140 // may be more complexity somewhere.
141 Scalar nominal_distance_noise; // meters
142 // The noise in the skew measurement when the target is 1m away and head-on
143 // to us. Calculated in the same manner with the same caveats as the
144 // distance noise.
145 Scalar nominal_skew_noise; // radians
146 // Noise in the height measurement, same rules as for skew and distance.
147 // TODO(james): Figure out how much noise we will actually get in the
148 // height, since there will be extremely low resolution on it.
149 Scalar nominal_height_noise; // meters
150 };
151
152 // Creates a camera:
153 // pose: The Pose of the camera, relative to the robot at least transitively.
154 // fov: The field-of-view of the camera, in radians. Note that this is the
155 // *total* field-of-view in the horizontal plane (left-right), so the angle
156 // from the left edge of the image to the right edge.
157 // targets: The list of targets on the field that could be seen by the camera.
158 // obstacles: The list of known obstacles on the field.
159 TypedCamera(const Pose &pose, Scalar fov,
160 const NoiseParameters &noise_parameters,
161 const ::std::array<TypedTarget<Scalar>, num_targets> &targets,
162 const ::std::array<LineSegment, num_obstacles> &obstacles)
163 : pose_(pose),
164 fov_(fov),
165 noise_parameters_(noise_parameters),
166 targets_(targets),
167 obstacles_(obstacles) {}
168
169 // Returns a list of TargetViews for all the currently visible targets.
170 // These will contain ground-truth TargetViews, so the readings will be
171 // perfect; a pseudo-random number generator should be used to add noise
172 // separately for simulation.
173 ::aos::SizedArray<TargetView, num_targets> target_views() const {
174 ::aos::SizedArray<TargetView, num_targets> views;
175 // Because there are num_targets in targets_ and because AddTargetIfVisible
176 // adds at most 1 view to views, we should never exceed the size of
177 // SizedArray.
178 for (const auto &target : targets_) {
179 AddTargetIfVisible(target, &views);
180 }
181 return views;
182 }
183
184 // Returns a list of list of points for plotting. Each list of points should
185 // be plotted as a line; currently, each list is just a pair drawing a line
186 // from the camera aperture to the target location.
187 // This should not be called from real-time code, as it will probably
188 // dynamically allocate memory.
189 ::std::vector<::std::vector<Pose>> PlotPoints() const {
190 ::std::vector<::std::vector<Pose>> list_of_lists;
191 for (const auto &view : target_views()) {
192 list_of_lists.push_back({pose_, view.target.pose()});
193 }
194 return list_of_lists;
195 }
196
197 const Pose pose() const { return pose_; }
198 Scalar fov() const { return fov_; }
199
200 private:
201
202 // If the specified target is visible from the current camera Pose, adds it to
203 // the views array.
204 void AddTargetIfVisible(
205 const TypedTarget<Scalar> &target,
206 ::aos::SizedArray<TargetView, num_targets> *views) const;
207
208 // The Pose of this camera.
209 const Pose pose_;
210 // Field of view of the camera, in radians.
211 const Scalar fov_;
212
213 // Various constants to calclate sensor noise; see definition of
214 // NoiseParameters for more detail.
215 const NoiseParameters noise_parameters_;
216
217 // A list of all the targets on the field.
218 const ::std::array<TypedTarget<Scalar>, num_targets> targets_;
219 // Known obstacles on the field which can interfere with our view of the
220 // targets. An "obstacle" is a line segment which we cannot see through, as
221 // such a logical obstacle (e.g., the cargo ship) may consist of many
222 // obstacles in this list to account for all of its sides.
223 const ::std::array<LineSegment, num_obstacles> obstacles_;
224}; // class TypedCamera
225
226template <int num_targets, int num_obstacles, typename Scalar>
227void TypedCamera<num_targets, num_obstacles, Scalar>::AddTargetIfVisible(
228 const TypedTarget<Scalar> &target,
229 ::aos::SizedArray<TargetView, num_targets> *views) const {
230 if (target.occluded()) {
231 return;
232 }
233
234 // Precompute the current absolute pose of the camera, because we will reuse
235 // it a bunch.
236 const Pose camera_abs_pose = pose_.Rebase(nullptr);
237
238 const Pose relative_pose = target.pose().Rebase(&camera_abs_pose);
239 const Scalar heading = relative_pose.heading();
240 const Scalar distance = relative_pose.xy_norm();
241 const Scalar skew = ::aos::math::NormalizeAngle(relative_pose.rel_theta());
242
243 // Check if the camera is in the angular FOV.
244 if (::std::abs(heading) > fov_ / 2.0) {
245 return;
246 }
247
248 // Calculate the width of the target as it appears in the image.
249 // This number is unitless and if greater than 1, implies that the target is
250 // visible to the camera and if less than 1 implies it is too small to be
251 // registered on the camera.
252 const Scalar apparent_width =
253 ::std::max(0.0, ::std::cos(skew) *
254 noise_parameters_.max_viewable_distance / distance);
255
256 if (apparent_width < 1.0) {
257 return;
258 }
259
260 // Final visibility check is for whether there are any obstacles blocking or
261 // line of sight.
262 for (const auto &obstacle : obstacles_) {
263 if (obstacle.Intersects({camera_abs_pose, target.pose()})) {
264 return;
265 }
266 }
267
268 Scalar normalized_width =
269 apparent_width / noise_parameters_.max_viewable_distance;
270 Scalar distance_noise =
271 noise_parameters_.nominal_distance_noise / normalized_width;
272 Scalar skew_noise = noise_parameters_.nominal_skew_noise / normalized_width;
273 Scalar height_noise =
274 noise_parameters_.nominal_height_noise / normalized_width;
275
276 // If we've made it here, the target is visible to the camera and we should
277 // provide the actual TargetView in question.
278 TargetView view;
279 view.reading = {heading, distance, target.pose().abs_pos().z(), skew};
280 view.noise = {noise_parameters_.heading_noise, distance_noise, height_noise,
281 skew_noise};
282 view.target = &target;
283 views->push_back(view);
284}
285
286} // namespace control_loops
287} // namespace y2019
288
289#endif // Y2019_CONTROL_LOOPS_DRIVETRAIN_CAMERA_H_