Merge "Reshuffle Teensy frame queueing code to fix a bug"
diff --git a/aos/input/action_joystick_input.cc b/aos/input/action_joystick_input.cc
index 81e277d..061f4dc 100644
--- a/aos/input/action_joystick_input.cc
+++ b/aos/input/action_joystick_input.cc
@@ -43,7 +43,8 @@
void ActionJoystickInput::StartAuto() {
LOG(INFO, "Starting auto mode\n");
- action_queue_.EnqueueAction(::frc971::autonomous::MakeAutonomousAction(0));
+ action_queue_.EnqueueAction(
+ ::frc971::autonomous::MakeAutonomousAction(GetAutonomousMode()));
}
void ActionJoystickInput::StopAuto() {
diff --git a/aos/input/action_joystick_input.h b/aos/input/action_joystick_input.h
index 87b980e..dca4d39 100644
--- a/aos/input/action_joystick_input.h
+++ b/aos/input/action_joystick_input.h
@@ -40,6 +40,10 @@
void StartAuto();
void StopAuto();
+ // Returns the current autonomous mode which has been selected by robot
+ // inputs.
+ virtual uint32_t GetAutonomousMode() { return 0; }
+
// True if the internal state machine thinks auto is running right now.
bool auto_running_ = false;
// True if an action was running last cycle.
diff --git a/y2019/actors/autonomous_actor.cc b/y2019/actors/autonomous_actor.cc
index 1647f04..41a70fd 100644
--- a/y2019/actors/autonomous_actor.cc
+++ b/y2019/actors/autonomous_actor.cc
@@ -27,10 +27,6 @@
.count();
}
-constexpr bool is_left = false;
-
-constexpr double turn_scalar = is_left ? 1.0 : -1.0;
-
} // namespace
AutonomousActor::AutonomousActor(
@@ -38,7 +34,8 @@
: frc971::autonomous::BaseAutonomousActor(
s, control_loops::drivetrain::GetDrivetrainConfig()) {}
-void AutonomousActor::Reset() {
+void AutonomousActor::Reset(bool is_left) {
+ const double turn_scalar = is_left ? 1.0 : -1.0;
elevator_goal_ = 0.01;
wrist_goal_ = -M_PI / 2.0;
intake_goal_ = -1.2;
@@ -86,8 +83,15 @@
bool AutonomousActor::RunAction(
const ::frc971::autonomous::AutonomousActionParams ¶ms) {
monotonic_clock::time_point start_time = monotonic_clock::now();
- LOG(INFO, "Starting autonomous action with mode %" PRId32 "\n", params.mode);
- Reset();
+ const bool is_left = params.mode == 0;
+
+ {
+ LOG(INFO, "Starting autonomous action with mode %" PRId32 " %s\n",
+ params.mode, is_left ? "left" : "right");
+ }
+ const double turn_scalar = is_left ? 1.0 : -1.0;
+
+ Reset(is_left);
// Grab the disk, wait until we have vacuum, then jump
set_elevator_goal(0.01);
diff --git a/y2019/actors/autonomous_actor.h b/y2019/actors/autonomous_actor.h
index 82e1aeb..38db070 100644
--- a/y2019/actors/autonomous_actor.h
+++ b/y2019/actors/autonomous_actor.h
@@ -26,7 +26,7 @@
const ::frc971::autonomous::AutonomousActionParams ¶ms) override;
private:
- void Reset();
+ void Reset(bool is_left);
double elevator_goal_ = 0.0;
double wrist_goal_ = 0.0;
diff --git a/y2019/image_streamer/flip_image.cc b/y2019/image_streamer/flip_image.cc
index 2db20b2..48a18c5 100644
--- a/y2019/image_streamer/flip_image.cc
+++ b/y2019/image_streamer/flip_image.cc
@@ -10,7 +10,9 @@
::cimg_library::CImg<unsigned char> image;
image.load_jpeg_buffer((JOCTET *)(input), input_size);
if (flip) {
- image.mirror("xy");
+ image.rotate(90);
+ } else {
+ image.rotate(270);
}
image.save_jpeg_buffer(buffer, *buffer_size, 80);
diff --git a/y2019/joystick_reader.cc b/y2019/joystick_reader.cc
index 9d7117b..b403913 100644
--- a/y2019/joystick_reader.cc
+++ b/y2019/joystick_reader.cc
@@ -400,6 +400,15 @@
}
private:
+ uint32_t GetAutonomousMode() override {
+ ::frc971::autonomous::auto_mode.FetchLatest();
+ if (::frc971::autonomous::auto_mode.get() == nullptr) {
+ LOG(WARNING, "no auto mode values\n");
+ return 0;
+ }
+ return ::frc971::autonomous::auto_mode->mode;
+ }
+
// Current goals here.
ElevatorWristPosition elevator_wrist_pos_ = kStowPos;
bool grab_piece_ = false;
diff --git a/y2019/vision/server/BUILD b/y2019/vision/server/BUILD
index 180b41e..420731a 100644
--- a/y2019/vision/server/BUILD
+++ b/y2019/vision/server/BUILD
@@ -1,4 +1,5 @@
load("//aos/seasocks:gen_embedded.bzl", "gen_embedded")
+load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library")
load("//aos/downloader:downloader.bzl", "aos_downloader_dir")
load("@build_bazel_rules_typescript//:defs.bzl", "ts_library")
load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle")
@@ -21,25 +22,31 @@
aos_downloader_dir(
name = "www_files",
srcs = [
- "//y2019/vision/server/www:visualizer_bundle",
"//y2019/vision/server/www:files",
+ "//y2019/vision/server/www:visualizer_bundle",
],
dir = "www",
visibility = ["//visibility:public"],
)
+cc_proto_library(
+ name = "server_data_proto",
+ srcs = ["server_data.proto"],
+)
+
cc_binary(
name = "server",
srcs = [
"server.cc",
],
data = [
- "//y2019/vision/server/www:visualizer_bundle",
"//y2019/vision/server/www:files",
+ "//y2019/vision/server/www:visualizer_bundle",
],
visibility = ["//visibility:public"],
deps = [
":gen_embedded",
+ ":server_data_proto",
"//aos:init",
"//aos/logging",
"//aos/time",
diff --git a/y2019/vision/server/server.cc b/y2019/vision/server/server.cc
index 235054b..95ecd8d 100644
--- a/y2019/vision/server/server.cc
+++ b/y2019/vision/server/server.cc
@@ -11,11 +11,13 @@
#include "aos/time/time.h"
#include "frc971/control_loops/drivetrain/drivetrain.q.h"
#include "internal/Embedded.h"
+#include "google/protobuf/util/json_util.h"
#include "seasocks/PrintfLogger.h"
#include "seasocks/Server.h"
#include "seasocks/StringUtil.h"
#include "seasocks/WebSocket.h"
#include "y2019/control_loops/drivetrain/camera.q.h"
+#include "y2019/vision/server/server_data.pb.h"
namespace y2019 {
namespace vision {
@@ -146,7 +148,8 @@
std::chrono::nanoseconds(new_frame.timestamp));
latest_frames[new_frame.camera].targets.clear();
if (new_frame.num_targets > 0) {
- last_target_time[new_frame.camera] = now;
+ last_target_time[new_frame.camera] =
+ latest_frames[new_frame.camera].capture_time;
}
for (int target = 0; target < new_frame.num_targets; ++target) {
latest_frames[new_frame.camera].targets.emplace_back();
@@ -159,40 +162,43 @@
// TODO(james): Use protobuf or the such to generate JSON rather than
// doing so manually.
last_send_time = now;
- std::ostringstream stream;
- stream << "{\n";
-
- stream << "\"robot\": {";
- stream << "\"x\": " << drivetrain_status->x << ",";
- stream << "\"y\": " << drivetrain_status->y << ",";
- stream << "\"theta\": " << drivetrain_status->theta;
- stream << "}\n";
- stream << ",\"target\": {";
- stream << "\"x\": " << drivetrain_status->line_follow_logging.x << ",";
- stream << "\"y\": " << drivetrain_status->line_follow_logging.y << ",";
- stream << "\"theta\": " << drivetrain_status->line_follow_logging.theta
- << ",";
- stream << "\"frozen\": " << drivetrain_status->line_follow_logging.frozen
- << ",";
- stream << "\"have_target\": "
- << drivetrain_status->line_follow_logging.have_target;
- stream << "} ";
-
- stream << ", \"last_target_age\": [";
- bool first = true;
- for (const auto t : last_target_time) {
- if (!first) {
- stream << ",";
- }
- first = false;
- stream << ::std::chrono::duration_cast<::std::chrono::duration<double>>(
- now - t).count();
+ DebugData debug_data;
+ debug_data.mutable_robot_pose()->set_x(drivetrain_status->x);
+ debug_data.mutable_robot_pose()->set_y(drivetrain_status->y);
+ debug_data.mutable_robot_pose()->set_theta(drivetrain_status->theta);
+ {
+ LineFollowDebug *line_debug = debug_data.mutable_line_follow_debug();
+ line_debug->set_frozen(drivetrain_status->line_follow_logging.frozen);
+ line_debug->set_have_target(
+ drivetrain_status->line_follow_logging.have_target);
+ line_debug->mutable_goal_target()->set_x(
+ drivetrain_status->line_follow_logging.x);
+ line_debug->mutable_goal_target()->set_y(
+ drivetrain_status->line_follow_logging.y);
+ line_debug->mutable_goal_target()->set_theta(
+ drivetrain_status->line_follow_logging.theta);
}
- stream << "]";
+ for (size_t ii = 0; ii < latest_frames.size(); ++ii) {
+ CameraDebug *camera_debug = debug_data.add_camera_debug();
+ LocalCameraFrame cur_frame = latest_frames[ii];
- stream << "}";
+ camera_debug->set_current_frame_age(
+ ::std::chrono::duration_cast<::std::chrono::duration<double>>(
+ now - cur_frame.capture_time).count());
+ camera_debug->set_time_since_last_target(
+ ::std::chrono::duration_cast<::std::chrono::duration<double>>(
+ now - last_target_time[ii]).count());
+ for (const auto &target : cur_frame.targets) {
+ Pose *pose = camera_debug->add_targets();
+ pose->set_x(target.x);
+ pose->set_y(target.y);
+ pose->set_theta(target.theta);
+ }
+ }
+ ::std::string json;
+ google::protobuf::util::MessageToJsonString(debug_data, &json);
server->execute(
- std::make_shared<UpdateData>(websocket_handler, stream.str()));
+ std::make_shared<UpdateData>(websocket_handler, ::std::move(json)));
}
}
}
diff --git a/y2019/vision/server/server_data.proto b/y2019/vision/server/server_data.proto
new file mode 100644
index 0000000..d3e5665
--- /dev/null
+++ b/y2019/vision/server/server_data.proto
@@ -0,0 +1,48 @@
+syntax = "proto2";
+
+package y2019.vision;
+
+// Various proto definitions for use with the websocket server for debugging the
+// vision code.
+
+// Representation of a 2D pose on the field, generally referenced off of the
+// center of the driver's station wall with positive X out towards the field and
+// positive Y to the left from the driver's perspective. theta is zero when
+// pointed straight along the positive X axis and increases towards positive Y
+// (counter-clockwise).
+message Pose {
+ optional float x = 1; // meters
+ optional float y = 2; // meters
+ optional float theta = 3; // radians
+}
+
+// Debugging information for the line following.
+message LineFollowDebug {
+ // Pose of the target that we are currently latched onto.
+ optional Pose goal_target = 1;
+ // Whether we are currently in line following mode and so will freeze the
+ // target shortly.
+ optional bool frozen = 2;
+ // Whether we have chosen a target (otherwise, goal_target may be arbitrary).
+ optional bool have_target = 3;
+}
+
+// Data for a single camera at a given instance in time (corresponding to a
+// camera frame).
+message CameraDebug {
+ // The time that has passed, in seconds, since we last got a frame with a
+ // target in it.
+ optional float time_since_last_target = 1;
+ // The age of the most recent frame and the one that contains the targets included below.
+ optional float current_frame_age = 2;
+ // Target Pose is relative to the camera, *not* the field, so (0, 0) is at the
+ // camera.
+ repeated Pose targets = 3;
+}
+
+// The overall package of data that we send to the webpage.
+message DebugData {
+ optional Pose robot_pose = 1;
+ optional LineFollowDebug line_follow_debug = 2;
+ repeated CameraDebug camera_debug = 3;
+}
diff --git a/y2019/vision/server/www/generate_camera.cc b/y2019/vision/server/www/generate_camera.cc
index 8e516c3..757ce69 100644
--- a/y2019/vision/server/www/generate_camera.cc
+++ b/y2019/vision/server/www/generate_camera.cc
@@ -15,6 +15,8 @@
out_file << "export const CAMERA_POSES = [\n";
for (size_t ii = 0; ii < constants::Values::kNumCameras; ++ii) {
out_file << " ";
+ // TODO(james): Decide how to manage visualization for practice and code
+ // bots.
DumpPose(&out_file,
GetCamera(CameraSerialNumbers(CompBotTeensyId())[ii])->geometry);
out_file << ",\n";
diff --git a/y2019/vision/server/www/main.ts b/y2019/vision/server/www/main.ts
index a9a57bd..beb2734 100644
--- a/y2019/vision/server/www/main.ts
+++ b/y2019/vision/server/www/main.ts
@@ -28,18 +28,18 @@
reader.addEventListener('loadend', (e) => {
const text = e.srcElement.result;
const j = JSON.parse(text);
- this.x = j.robot.x;
- this.y = j.robot.y;
- this.theta = j.robot.theta;
+ this.x = j.robotPose.x;
+ this.y = j.robotPose.y;
+ this.theta = j.robotPose.theta;
- if(j.target) {
- this.targetLocked = j.target.frozen && j.target.have_target;
- this.targetX = j.target.x;
- this.targetY = j.target.y;
- this.targetTheta = j.target.theta;
+ if(j.lineFollowDebug) {
+ this.targetLocked = j.lineFollowDebug.frozen && j.lineFollowDebug.haveTarget;
+ this.targetX = j.lineFollowDebug.goalTarget.x;
+ this.targetY = j.lineFollowDebug.goalTarget.y;
+ this.targetTheta = j.lineFollowDebug.goalTarget.theta;
}
for (let ii of [0, 1, 2, 3, 4]) {
- if (j.last_target_age[ii] > 0.25) {
+ if (j.cameraDebug[ii].timeSinceLastTarget > 0.25) {
this.cameraColors[ii] = 'red';
} else {
this.cameraColors[ii] = 'green';
diff --git a/y2019/wpilib_interface.cc b/y2019/wpilib_interface.cc
index fd9df73..e75b825 100644
--- a/y2019/wpilib_interface.cc
+++ b/y2019/wpilib_interface.cc
@@ -203,6 +203,11 @@
vacuum_sensor_ = make_unique<frc::AnalogInput>(port);
}
+ // Auto mode switches.
+ void set_autonomous_mode(int i, ::std::unique_ptr<frc::DigitalInput> sensor) {
+ autonomous_modes_.at(i) = ::std::move(sensor);
+ }
+
void RunIteration() override {
{
auto drivetrain_message = drivetrain_queue.position.MakeMessage();
@@ -254,6 +259,18 @@
superstructure_message.Send();
}
+
+ {
+ auto auto_mode_message = ::frc971::autonomous::auto_mode.MakeMessage();
+ auto_mode_message->mode = 0;
+ for (size_t i = 0; i < autonomous_modes_.size(); ++i) {
+ if (autonomous_modes_[i] && autonomous_modes_[i]->Get()) {
+ auto_mode_message->mode |= 1 << i;
+ }
+ }
+ LOG_STRUCT(DEBUG, "auto mode", *auto_mode_message);
+ auto_mode_message.Send();
+ }
}
private:
@@ -262,6 +279,8 @@
::std::unique_ptr<frc::AnalogInput> vacuum_sensor_;
+ ::std::array<::std::unique_ptr<frc::DigitalInput>, 2> autonomous_modes_;
+
::frc971::wpilib::AbsoluteEncoder intake_encoder_;
// TODO(sabina): Add wrist and elevator hall effects.
};
@@ -646,6 +665,9 @@
reader.set_pwm_trigger(true);
reader.set_vacuum_sensor(7);
+ reader.set_autonomous_mode(0, make_unique<frc::DigitalInput>(22));
+ reader.set_autonomous_mode(0, make_unique<frc::DigitalInput>(23));
+
::std::thread reader_thread(::std::ref(reader));
CameraReader camera_reader;