Use protobuf to generate websocket JSON
Manually generating the JSON was getting to be painful, use protobuf's
JSON generator.
Change-Id: Id387ae9745de2e744d602f4fbec606f6ba1364bc
diff --git a/y2019/vision/server/BUILD b/y2019/vision/server/BUILD
index 180b41e..420731a 100644
--- a/y2019/vision/server/BUILD
+++ b/y2019/vision/server/BUILD
@@ -1,4 +1,5 @@
load("//aos/seasocks:gen_embedded.bzl", "gen_embedded")
+load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library")
load("//aos/downloader:downloader.bzl", "aos_downloader_dir")
load("@build_bazel_rules_typescript//:defs.bzl", "ts_library")
load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle")
@@ -21,25 +22,31 @@
aos_downloader_dir(
name = "www_files",
srcs = [
- "//y2019/vision/server/www:visualizer_bundle",
"//y2019/vision/server/www:files",
+ "//y2019/vision/server/www:visualizer_bundle",
],
dir = "www",
visibility = ["//visibility:public"],
)
+cc_proto_library(
+ name = "server_data_proto",
+ srcs = ["server_data.proto"],
+)
+
cc_binary(
name = "server",
srcs = [
"server.cc",
],
data = [
- "//y2019/vision/server/www:visualizer_bundle",
"//y2019/vision/server/www:files",
+ "//y2019/vision/server/www:visualizer_bundle",
],
visibility = ["//visibility:public"],
deps = [
":gen_embedded",
+ ":server_data_proto",
"//aos:init",
"//aos/logging",
"//aos/time",
diff --git a/y2019/vision/server/server.cc b/y2019/vision/server/server.cc
index 235054b..95ecd8d 100644
--- a/y2019/vision/server/server.cc
+++ b/y2019/vision/server/server.cc
@@ -11,11 +11,13 @@
#include "aos/time/time.h"
#include "frc971/control_loops/drivetrain/drivetrain.q.h"
#include "internal/Embedded.h"
+#include "google/protobuf/util/json_util.h"
#include "seasocks/PrintfLogger.h"
#include "seasocks/Server.h"
#include "seasocks/StringUtil.h"
#include "seasocks/WebSocket.h"
#include "y2019/control_loops/drivetrain/camera.q.h"
+#include "y2019/vision/server/server_data.pb.h"
namespace y2019 {
namespace vision {
@@ -146,7 +148,8 @@
std::chrono::nanoseconds(new_frame.timestamp));
latest_frames[new_frame.camera].targets.clear();
if (new_frame.num_targets > 0) {
- last_target_time[new_frame.camera] = now;
+ last_target_time[new_frame.camera] =
+ latest_frames[new_frame.camera].capture_time;
}
for (int target = 0; target < new_frame.num_targets; ++target) {
latest_frames[new_frame.camera].targets.emplace_back();
@@ -159,40 +162,43 @@
// TODO(james): Use protobuf or the such to generate JSON rather than
// doing so manually.
last_send_time = now;
- std::ostringstream stream;
- stream << "{\n";
-
- stream << "\"robot\": {";
- stream << "\"x\": " << drivetrain_status->x << ",";
- stream << "\"y\": " << drivetrain_status->y << ",";
- stream << "\"theta\": " << drivetrain_status->theta;
- stream << "}\n";
- stream << ",\"target\": {";
- stream << "\"x\": " << drivetrain_status->line_follow_logging.x << ",";
- stream << "\"y\": " << drivetrain_status->line_follow_logging.y << ",";
- stream << "\"theta\": " << drivetrain_status->line_follow_logging.theta
- << ",";
- stream << "\"frozen\": " << drivetrain_status->line_follow_logging.frozen
- << ",";
- stream << "\"have_target\": "
- << drivetrain_status->line_follow_logging.have_target;
- stream << "} ";
-
- stream << ", \"last_target_age\": [";
- bool first = true;
- for (const auto t : last_target_time) {
- if (!first) {
- stream << ",";
- }
- first = false;
- stream << ::std::chrono::duration_cast<::std::chrono::duration<double>>(
- now - t).count();
+ DebugData debug_data;
+ debug_data.mutable_robot_pose()->set_x(drivetrain_status->x);
+ debug_data.mutable_robot_pose()->set_y(drivetrain_status->y);
+ debug_data.mutable_robot_pose()->set_theta(drivetrain_status->theta);
+ {
+ LineFollowDebug *line_debug = debug_data.mutable_line_follow_debug();
+ line_debug->set_frozen(drivetrain_status->line_follow_logging.frozen);
+ line_debug->set_have_target(
+ drivetrain_status->line_follow_logging.have_target);
+ line_debug->mutable_goal_target()->set_x(
+ drivetrain_status->line_follow_logging.x);
+ line_debug->mutable_goal_target()->set_y(
+ drivetrain_status->line_follow_logging.y);
+ line_debug->mutable_goal_target()->set_theta(
+ drivetrain_status->line_follow_logging.theta);
}
- stream << "]";
+ for (size_t ii = 0; ii < latest_frames.size(); ++ii) {
+ CameraDebug *camera_debug = debug_data.add_camera_debug();
+ LocalCameraFrame cur_frame = latest_frames[ii];
- stream << "}";
+ camera_debug->set_current_frame_age(
+ ::std::chrono::duration_cast<::std::chrono::duration<double>>(
+ now - cur_frame.capture_time).count());
+ camera_debug->set_time_since_last_target(
+ ::std::chrono::duration_cast<::std::chrono::duration<double>>(
+ now - last_target_time[ii]).count());
+ for (const auto &target : cur_frame.targets) {
+ Pose *pose = camera_debug->add_targets();
+ pose->set_x(target.x);
+ pose->set_y(target.y);
+ pose->set_theta(target.theta);
+ }
+ }
+ ::std::string json;
+ google::protobuf::util::MessageToJsonString(debug_data, &json);
server->execute(
- std::make_shared<UpdateData>(websocket_handler, stream.str()));
+ std::make_shared<UpdateData>(websocket_handler, ::std::move(json)));
}
}
}
diff --git a/y2019/vision/server/server_data.proto b/y2019/vision/server/server_data.proto
new file mode 100644
index 0000000..d3e5665
--- /dev/null
+++ b/y2019/vision/server/server_data.proto
@@ -0,0 +1,48 @@
+syntax = "proto2";
+
+package y2019.vision;
+
+// Various proto definitions for use with the websocket server for debugging the
+// vision code.
+
+// Representation of a 2D pose on the field, generally referenced off of the
+// center of the driver's station wall with positive X out towards the field and
+// positive Y to the left from the driver's perspective. theta is zero when
+// pointed straight along the positive X axis and increases towards positive Y
+// (counter-clockwise).
+message Pose {
+ optional float x = 1; // meters
+ optional float y = 2; // meters
+ optional float theta = 3; // radians
+}
+
+// Debugging information for the line following.
+message LineFollowDebug {
+ // Pose of the target that we are currently latched onto.
+ optional Pose goal_target = 1;
+ // Whether we are currently in line following mode and so will freeze the
+ // target shortly.
+ optional bool frozen = 2;
+ // Whether we have chosen a target (otherwise, goal_target may be arbitrary).
+ optional bool have_target = 3;
+}
+
+// Data for a single camera at a given instance in time (corresponding to a
+// camera frame).
+message CameraDebug {
+ // The time that has passed, in seconds, since we last got a frame with a
+ // target in it.
+ optional float time_since_last_target = 1;
+ // The age of the most recent frame and the one that contains the targets included below.
+ optional float current_frame_age = 2;
+ // Target Pose is relative to the camera, *not* the field, so (0, 0) is at the
+ // camera.
+ repeated Pose targets = 3;
+}
+
+// The overall package of data that we send to the webpage.
+message DebugData {
+ optional Pose robot_pose = 1;
+ optional LineFollowDebug line_follow_debug = 2;
+ repeated CameraDebug camera_debug = 3;
+}
diff --git a/y2019/vision/server/www/generate_camera.cc b/y2019/vision/server/www/generate_camera.cc
index 8e516c3..757ce69 100644
--- a/y2019/vision/server/www/generate_camera.cc
+++ b/y2019/vision/server/www/generate_camera.cc
@@ -15,6 +15,8 @@
out_file << "export const CAMERA_POSES = [\n";
for (size_t ii = 0; ii < constants::Values::kNumCameras; ++ii) {
out_file << " ";
+ // TODO(james): Decide how to manage visualization for practice and code
+ // bots.
DumpPose(&out_file,
GetCamera(CameraSerialNumbers(CompBotTeensyId())[ii])->geometry);
out_file << ",\n";
diff --git a/y2019/vision/server/www/main.ts b/y2019/vision/server/www/main.ts
index a9a57bd..beb2734 100644
--- a/y2019/vision/server/www/main.ts
+++ b/y2019/vision/server/www/main.ts
@@ -28,18 +28,18 @@
reader.addEventListener('loadend', (e) => {
const text = e.srcElement.result;
const j = JSON.parse(text);
- this.x = j.robot.x;
- this.y = j.robot.y;
- this.theta = j.robot.theta;
+ this.x = j.robotPose.x;
+ this.y = j.robotPose.y;
+ this.theta = j.robotPose.theta;
- if(j.target) {
- this.targetLocked = j.target.frozen && j.target.have_target;
- this.targetX = j.target.x;
- this.targetY = j.target.y;
- this.targetTheta = j.target.theta;
+ if(j.lineFollowDebug) {
+ this.targetLocked = j.lineFollowDebug.frozen && j.lineFollowDebug.haveTarget;
+ this.targetX = j.lineFollowDebug.goalTarget.x;
+ this.targetY = j.lineFollowDebug.goalTarget.y;
+ this.targetTheta = j.lineFollowDebug.goalTarget.theta;
}
for (let ii of [0, 1, 2, 3, 4]) {
- if (j.last_target_age[ii] > 0.25) {
+ if (j.cameraDebug[ii].timeSinceLastTarget > 0.25) {
this.cameraColors[ii] = 'red';
} else {
this.cameraColors[ii] = 'green';