Merge "Added instructions for getting setup with the LDAP"
diff --git a/aos/BUILD b/aos/BUILD
index 2bb1d50..a7d691b 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -281,6 +281,7 @@
flatbuffer_cc_library(
name = "configuration_fbs",
srcs = ["configuration.fbs"],
+ gen_reflections = 1,
visibility = ["//visibility:public"],
)
@@ -335,10 +336,17 @@
visibility = ["//visibility:public"],
)
+flatbuffer_ts_library(
+ name = "json_to_flatbuffer_flatbuffer_ts",
+ srcs = ["json_to_flatbuffer.fbs"],
+ visibility = ["//aos:__subpackages__"],
+)
+
flatbuffer_cc_library(
name = "json_to_flatbuffer_flatbuffer",
srcs = ["json_to_flatbuffer.fbs"],
gen_reflections = 1,
+ visibility = ["//aos:__subpackages__"],
)
cc_library(
diff --git a/aos/events/BUILD b/aos/events/BUILD
index 0212eaa..01e256c 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -312,6 +312,16 @@
],
)
+cc_test(
+ name = "event_scheduler_test",
+ srcs = ["event_scheduler_test.cc"],
+ deps = [
+ ":simulated_event_loop",
+ "//aos/testing:googletest",
+ "@com_github_google_glog//:glog",
+ ],
+)
+
cc_library(
name = "aos_logging",
srcs = [
diff --git a/aos/events/event_scheduler.cc b/aos/events/event_scheduler.cc
index f985eb1..202c772 100644
--- a/aos/events/event_scheduler.cc
+++ b/aos/events/event_scheduler.cc
@@ -44,11 +44,13 @@
void EventScheduler::CallOldestEvent() {
CHECK_GT(events_list_.size(), 0u);
auto iter = events_list_.begin();
- now_ = iter->first;
+ monotonic_now_ = iter->first;
+ monotonic_now_valid_ = true;
::std::function<void()> callback = ::std::move(iter->second);
events_list_.erase(iter);
callback();
+ monotonic_now_valid_ = false;
}
void EventScheduler::RunOnRun() {
diff --git a/aos/events/event_scheduler.h b/aos/events/event_scheduler.h
index e6c732b..468d904 100644
--- a/aos/events/event_scheduler.h
+++ b/aos/events/event_scheduler.h
@@ -81,16 +81,20 @@
// measurement.
distributed_clock::time_point ToDistributedClock(
monotonic_clock::time_point time) const {
- return distributed_clock::epoch() + time.time_since_epoch() +
- monotonic_offset_;
+ return distributed_clock::epoch() +
+ std::chrono::duration_cast<std::chrono::nanoseconds>(
+ (time.time_since_epoch() - distributed_offset_) /
+ distributed_slope_);
}
// Takes the distributed time and converts it to the monotonic clock for this
// node.
monotonic_clock::time_point FromDistributedClock(
distributed_clock::time_point time) const {
- return monotonic_clock::epoch() + time.time_since_epoch() -
- monotonic_offset_;
+ return monotonic_clock::epoch() +
+ std::chrono::duration_cast<std::chrono::nanoseconds>(
+ time.time_since_epoch() * distributed_slope_) +
+ distributed_offset_;
}
// Returns the current monotonic time on this node calculated from the
@@ -98,24 +102,31 @@
inline monotonic_clock::time_point monotonic_now() const;
// Sets the offset between the distributed and monotonic clock.
- // distributed = monotonic + offset;
- void SetDistributedOffset(std::chrono::nanoseconds monotonic_offset) {
- monotonic_offset_ = monotonic_offset;
- }
+ // monotonic = distributed * slope + offset;
+ void SetDistributedOffset(std::chrono::nanoseconds distributed_offset,
+ double distributed_slope) {
+ // TODO(austin): Use a starting point to improve precision.
+ // TODO(austin): Make slope be the slope of the offset, not the input,
+ // throught the calculation process.
+ distributed_offset_ = distributed_offset;
+ distributed_slope_ = distributed_slope;
- // Returns the offset used to convert to and from the distributed clock.
- std::chrono::nanoseconds monotonic_offset() const {
- return monotonic_offset_;
+ // Once we update the offset, now isn't going to be valid anymore.
+ // TODO(austin): Probably should instead use the piecewise linear function
+ // and evaluate it correctly.
+ monotonic_now_valid_ = false;
}
private:
friend class EventSchedulerScheduler;
// Current execution time.
- monotonic_clock::time_point now_ = monotonic_clock::epoch();
+ bool monotonic_now_valid_ = false;
+ monotonic_clock::time_point monotonic_now_ = monotonic_clock::epoch();
// Offset to the distributed clock.
// distributed = monotonic + offset;
- std::chrono::nanoseconds monotonic_offset_ = std::chrono::seconds(0);
+ std::chrono::nanoseconds distributed_offset_ = std::chrono::seconds(0);
+ double distributed_slope_ = 1.0;
// List of functions to run (once) when running.
std::vector<std::function<void()>> on_run_;
@@ -184,8 +195,14 @@
inline monotonic_clock::time_point EventScheduler::monotonic_now() const {
// Make sure we stay in sync.
- CHECK_EQ(now_, FromDistributedClock(scheduler_scheduler_->distributed_now()));
- return now_;
+ if (monotonic_now_valid_) {
+ CHECK_NEAR(monotonic_now_,
+ FromDistributedClock(scheduler_scheduler_->distributed_now()),
+ std::chrono::nanoseconds(1));
+ return monotonic_now_;
+ } else {
+ return FromDistributedClock(scheduler_scheduler_->distributed_now());
+ }
}
inline bool EventScheduler::is_running() const {
diff --git a/aos/events/event_scheduler_test.cc b/aos/events/event_scheduler_test.cc
new file mode 100644
index 0000000..e2523db
--- /dev/null
+++ b/aos/events/event_scheduler_test.cc
@@ -0,0 +1,50 @@
+#include "aos/events/event_scheduler.h"
+
+#include <chrono>
+
+#include "gtest/gtest.h"
+
+namespace aos {
+
+namespace chrono = std::chrono;
+
+// Tests that the default parameters (slope of 1, offest of 0) behave as
+// an identity.
+TEST(EventSchedulerTest, IdentityTimeConversion) {
+ EventScheduler s;
+ EXPECT_EQ(s.FromDistributedClock(distributed_clock::epoch()),
+ monotonic_clock::epoch());
+
+ EXPECT_EQ(
+ s.FromDistributedClock(distributed_clock::epoch() + chrono::seconds(1)),
+ monotonic_clock::epoch() + chrono::seconds(1));
+
+ EXPECT_EQ(s.ToDistributedClock(monotonic_clock::epoch()),
+ distributed_clock::epoch());
+
+ EXPECT_EQ(
+ s.ToDistributedClock(monotonic_clock::epoch() + chrono::seconds(1)),
+ distributed_clock::epoch() + chrono::seconds(1));
+}
+
+// Tests that a non-unity slope is computed correctly.
+TEST(EventSchedulerTest, DoubleTimeConversion) {
+ EventScheduler s;
+ s.SetDistributedOffset(std::chrono::seconds(7), 2.0);
+
+ EXPECT_EQ(s.FromDistributedClock(distributed_clock::epoch()),
+ monotonic_clock::epoch() + chrono::seconds(7));
+
+ EXPECT_EQ(
+ s.FromDistributedClock(distributed_clock::epoch() + chrono::seconds(1)),
+ monotonic_clock::epoch() + chrono::seconds(9));
+
+ EXPECT_EQ(s.ToDistributedClock(monotonic_clock::epoch() + chrono::seconds(7)),
+ distributed_clock::epoch());
+
+ EXPECT_EQ(
+ s.ToDistributedClock(monotonic_clock::epoch() + chrono::seconds(9)),
+ distributed_clock::epoch() + chrono::seconds(1));
+}
+
+} // namespace aos
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index d630e98..eff977b 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -614,7 +614,8 @@
size_t node_index = 0;
for (std::unique_ptr<State> &state : states_) {
- state->node_event_loop_factory->SetDistributedOffset(offset(node_index));
+ state->node_event_loop_factory->SetDistributedOffset(-offset(node_index),
+ 1.0);
++node_index;
}
}
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 7843647..b6a30cb 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -667,7 +667,7 @@
const chrono::nanoseconds initial_pi2_offset = -chrono::seconds(1000);
chrono::nanoseconds pi2_offset = initial_pi2_offset;
- pi2->SetDistributedOffset(pi2_offset);
+ pi2->SetDistributedOffset(-pi2_offset, 1.0);
LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
<< pi2->realtime_now() << " distributed "
<< pi2->ToDistributedClock(pi2->monotonic_now());
@@ -675,7 +675,7 @@
for (int i = 0; i < 95; ++i) {
pi2_offset += chrono::nanoseconds(200);
- pi2->SetDistributedOffset(pi2_offset);
+ pi2->SetDistributedOffset(-pi2_offset, 1.0);
event_loop_factory_.RunFor(chrono::milliseconds(1));
}
@@ -692,7 +692,7 @@
for (int i = 0; i < 20000; ++i) {
pi2_offset += chrono::nanoseconds(200);
- pi2->SetDistributedOffset(pi2_offset);
+ pi2->SetDistributedOffset(-pi2_offset, 1.0);
event_loop_factory_.RunFor(chrono::milliseconds(1));
}
@@ -702,7 +702,7 @@
for (int i = 0; i < 40000; ++i) {
pi2_offset -= chrono::nanoseconds(200);
- pi2->SetDistributedOffset(pi2_offset);
+ pi2->SetDistributedOffset(-pi2_offset, 1.0);
event_loop_factory_.RunFor(chrono::milliseconds(1));
}
}
diff --git a/aos/events/simulated_event_loop.h b/aos/events/simulated_event_loop.h
index c8029de..de19b11 100644
--- a/aos/events/simulated_event_loop.h
+++ b/aos/events/simulated_event_loop.h
@@ -158,11 +158,14 @@
// measurement.
inline distributed_clock::time_point ToDistributedClock(
monotonic_clock::time_point time) const;
+ inline monotonic_clock::time_point FromDistributedClock(
+ distributed_clock::time_point time) const;
// Sets the offset between the monotonic clock and the central distributed
// clock. distributed_clock = monotonic_clock + offset.
- void SetDistributedOffset(std::chrono::nanoseconds monotonic_offset) {
- scheduler_.SetDistributedOffset(monotonic_offset);
+ void SetDistributedOffset(std::chrono::nanoseconds monotonic_offset,
+ double monotonic_slope) {
+ scheduler_.SetDistributedOffset(monotonic_offset, monotonic_slope);
}
private:
@@ -192,7 +195,7 @@
inline monotonic_clock::time_point NodeEventLoopFactory::monotonic_now() const {
// TODO(austin): Confirm that time never goes backwards?
- return scheduler_.FromDistributedClock(factory_->distributed_now());
+ return scheduler_.monotonic_now();
}
inline realtime_clock::time_point NodeEventLoopFactory::realtime_now() const {
@@ -200,6 +203,11 @@
realtime_offset_);
}
+inline monotonic_clock::time_point NodeEventLoopFactory::FromDistributedClock(
+ distributed_clock::time_point time) const {
+ return scheduler_.FromDistributedClock(time);
+}
+
inline distributed_clock::time_point NodeEventLoopFactory::ToDistributedClock(
monotonic_clock::time_point time) const {
return scheduler_.ToDistributedClock(time);
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index 67e54f8..33291b2 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -1,5 +1,6 @@
load("@build_bazel_rules_typescript//:defs.bzl", "ts_library")
-load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle")
+load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle", "nodejs_binary")
+load("//aos:config.bzl", "aos_config")
filegroup(
name = "files",
@@ -56,3 +57,57 @@
cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
visibility = ["//aos:__subpackages__"],
)
+
+ts_library(
+ name = "reflection_test_main",
+ srcs = [
+ "reflection_test_main.ts",
+ ],
+ deps = [
+ ":reflection_ts",
+ "//aos/network/www:proxy",
+ ],
+)
+
+ts_library(
+ name = "reflection_ts",
+ srcs = ["reflection.ts"],
+ deps =
+ [
+ "//aos:configuration_ts_fbs",
+ "//aos:json_to_flatbuffer_flatbuffer_ts",
+ ],
+)
+
+aos_config(
+ name = "test_config",
+ src = "test_config_file.json",
+ flatbuffers = [
+ "//aos:configuration_fbs",
+ "//aos:json_to_flatbuffer_flatbuffer",
+ ],
+ deps = [
+ "//aos/events:config",
+ ],
+)
+
+rollup_bundle(
+ name = "reflection_test_bundle",
+ entry_point = "aos/network/www/reflection_test_main",
+ deps = [
+ ":reflection_test_main",
+ ],
+)
+
+sh_binary(
+ name = "web_proxy_demo",
+ srcs = ["web_proxy_demo.sh"],
+ data = [
+ ":flatbuffers",
+ ":reflection_test.html",
+ ":reflection_test_bundle",
+ ":test_config.json",
+ "//aos/network:web_proxy_main",
+ "//y2020:config.json",
+ ],
+)
diff --git a/aos/network/www/reflection.ts b/aos/network/www/reflection.ts
new file mode 100644
index 0000000..5141790
--- /dev/null
+++ b/aos/network/www/reflection.ts
@@ -0,0 +1,417 @@
+// This library provides a few basic reflection utilities for Flatbuffers.
+// Currently, this only really supports the level of reflection that would
+// be necessary to convert a serialized flatbuffer to JSON using just a
+// reflection.Schema flatbuffer describing the type.
+// The current implementation is also not necessarily robust to invalidly
+// constructed flatbuffers.
+// See reflection_test_main.ts for sample usage.
+
+import {BaseType, Schema, Object, Field} from 'aos/configuration_generated';
+
+// Returns the size, in bytes, of the given type. For vectors/strings/etc.
+// returns the size of the offset.
+function typeSize(baseType: BaseType): number {
+ switch (baseType) {
+ case BaseType.None:
+ case BaseType.UType:
+ case BaseType.Bool:
+ case BaseType.Byte:
+ case BaseType.UByte:
+ return 1;
+ case BaseType.Short:
+ case BaseType.UShort:
+ return 2;
+ case BaseType.Int:
+ case BaseType.UInt:
+ return 4;
+ case BaseType.Long:
+ case BaseType.ULong:
+ return 8;
+ case BaseType.Float:
+ return 4;
+ case BaseType.Double:
+ return 8;
+ case BaseType.String:
+ case BaseType.Vector:
+ case BaseType.Obj:
+ case BaseType.Union:
+ case BaseType.Array:
+ return 4;
+ }
+}
+
+// Returns whether the given type is a scalar type.
+function isScalar(baseType: BaseType): boolean {
+ switch (baseType) {
+ case BaseType.UType:
+ case BaseType.Bool:
+ case BaseType.Byte:
+ case BaseType.UByte:
+ case BaseType.Short:
+ case BaseType.UShort:
+ case BaseType.Int:
+ case BaseType.UInt:
+ case BaseType.Long:
+ case BaseType.ULong:
+ case BaseType.Float:
+ case BaseType.Double:
+ return true;
+ case BaseType.None:
+ case BaseType.String:
+ case BaseType.Vector:
+ case BaseType.Obj:
+ case BaseType.Union:
+ case BaseType.Array:
+ return false;
+ }
+}
+
+// Returns whether the given type is integer or not.
+function isInteger(baseType: BaseType): boolean {
+ switch (baseType) {
+ case BaseType.UType:
+ case BaseType.Bool:
+ case BaseType.Byte:
+ case BaseType.UByte:
+ case BaseType.Short:
+ case BaseType.UShort:
+ case BaseType.Int:
+ case BaseType.UInt:
+ case BaseType.Long:
+ case BaseType.ULong:
+ return true;
+ case BaseType.Float:
+ case BaseType.Double:
+ case BaseType.None:
+ case BaseType.String:
+ case BaseType.Vector:
+ case BaseType.Obj:
+ case BaseType.Union:
+ case BaseType.Array:
+ return false;
+ }
+}
+
+// Returns whether the given type is a long--this is needed to know whether it
+// can be represented by the normal javascript number (8-byte integers require a
+// special type, since the native number type is an 8-byte double, which won't
+// represent 8-byte integers to full precision).
+function isLong(baseType: BaseType): boolean {
+ return isInteger(baseType) && (typeSize(baseType) > 4);
+}
+
+// TODO(james): Use the actual flatbuffers.ByteBuffer object; this is just
+// to prevent the typescript compiler from complaining.
+class ByteBuffer {}
+
+// Stores the data associated with a Table within a given buffer.
+export class Table {
+ // Wrapper to represent an object (Table or Struct) within a ByteBuffer.
+ // The ByteBuffer is the raw data associated with the object.
+ // typeIndex is an index into the schema object vector for the parser
+ // object that this is associated with.
+ // offset is the absolute location within the buffer where the root of the
+ // object is.
+ // Note that a given Table assumes that it is being used with a particular
+ // Schema object.
+ // External users should generally not be using this constructor directly.
+ constructor(
+ public readonly bb: ByteBuffer,
+ public readonly typeIndex: number, public readonly offset: number) {}
+ // Constructs a Table object for the root of a ByteBuffer--this assumes that
+ // the type of the Table is the root table of the Parser that you are using.
+ static getRootTable(bb: ByteBuffer): Table {
+ return new Table(bb, -1, bb.readInt32(bb.position()) + bb.position());
+ }
+ // Reads a scalar of a given type at a given offset.
+ readScalar(fieldType: BaseType, offset: number) {
+ switch (fieldType) {
+ case BaseType.UType:
+ case BaseType.Bool:
+ return this.bb.readUint8(offset);
+ case BaseType.Byte:
+ return this.bb.readInt8(offset);
+ case BaseType.UByte:
+ return this.bb.readUint8(offset);
+ case BaseType.Short:
+ return this.bb.readInt16(offset);
+ case BaseType.UShort:
+ return this.bb.readUint16(offset);
+ case BaseType.Int:
+ return this.bb.readInt32(offset);
+ case BaseType.UInt:
+ return this.bb.readUint32(offset);
+ case BaseType.Long:
+ return this.bb.readInt64(offset);
+ case BaseType.ULong:
+ return this.bb.readUint64(offset);
+ case BaseType.Float:
+ return this.bb.readFloat32(offset);
+ case BaseType.Double:
+ return this.bb.readFloat64(offset);
+ }
+ throw new Error('Unsupported message type ' + baseType);
+ }
+};
+
+// The Parser class uses a Schema to provide all the utilities required to
+// parse flatbuffers that have a type that is the same as the root_type defined
+// by the Schema.
+// The classical usage would be to, e.g., be reading a channel with a type of
+// "aos.FooBar". At startup, you would construct a Parser from the channel's
+// Schema. When a message is received on the channel , you would then use
+// Table.getRootTable() on the received buffer to construct the Table, and
+// then access the members using the various methods of the Parser (or just
+// convert the entire object to a javascript Object/JSON using toObject()).
+export class Parser {
+ constructor(private readonly schema: Schema) {}
+
+ // Parse a Table to a javascript object. This is can be used, e.g., to convert
+ // a flatbuffer Table to JSON.
+ // If readDefaults is set to true, then scalar fields will be filled out with
+ // their default values if not populated; if readDefaults is false and the
+ // field is not populated, the resulting object will not populate the field.
+ toObject(table: Table, readDefaults: boolean = false) {
+ const result = {};
+ const schema = this.getType(table.typeIndex);
+ const numFields = schema.fieldsLength();
+ for (let ii = 0; ii < numFields; ++ii) {
+ const field = schema.fields(ii);
+ const baseType = field.type().baseType();
+ let fieldValue = null;
+ if (isScalar(baseType)) {
+ fieldValue = this.readScalar(table, field.name(), readDefaults);
+ } else if (baseType === BaseType.String) {
+ fieldValue = this.readString(table, field.name());
+ } else if (baseType === BaseType.Obj) {
+ const subTable = this.readTable(table, field.name());
+ if (subTable !== null) {
+ fieldValue = this.toObject(subTable, readDefaults);
+ }
+ } else if (baseType === BaseType.Vector) {
+ const elementType = field.type().element();
+ if (isScalar(elementType)) {
+ fieldValue = this.readVectorOfScalars(table, field.name());
+ } else if (elementType === BaseType.String) {
+ fieldValue = this.readVectorOfStrings(table, field.name());
+ } else if (elementType === BaseType.Obj) {
+ const tables = this.readVectorOfTables(table, field.name());
+ if (tables !== null) {
+ fieldValue = [];
+ for (const table of tables) {
+ fieldValue.push(this.toObject(table, readDefaults));
+ }
+ }
+ } else {
+ throw new Error('Vectors of Unions and Arrays are not supported.');
+ }
+ } else {
+ throw new Error(
+ 'Unions and Arrays are not supported in field ' + field.name());
+ }
+ if (fieldValue !== null) {
+ result[field.name()] = fieldValue;
+ }
+ }
+ return result;
+ }
+
+ // Returns the Object definition associated with the given type index.
+ getType(typeIndex: number): Object {
+ if (typeIndex === -1) {
+ return this.schema.rootTable();
+ }
+ if (typeIndex < 0 || typeIndex > this.schema.objectsLength()) {
+ throw new Error("Type index out-of-range.");
+ }
+ return this.schema.objects(typeIndex);
+ }
+
+ // Retrieves the Field schema for the given field name within a given
+ // type index.
+ getField(fieldName: string, typeIndex: number): Field {
+ const schema: Object = this.getType(typeIndex);
+ const numFields = schema.fieldsLength();
+ for (let ii = 0; ii < numFields; ++ii) {
+ const field = schema.fields(ii);
+ const name = field.name();
+ if (fieldName === name) {
+ return field;
+ }
+ }
+ throw new Error(
+ 'Couldn\'t find field ' + fieldName + ' options are ' + fields);
+ }
+
+ // Reads a scalar with the given field name from a Table. If readDefaults
+ // is set to false and the field is unset, we will return null. If
+ // readDefaults is true and the field is unset, we will look-up the default
+ // value for the field and return that.
+ // For 64-bit fields, returns a flatbuffer Long rather than a standard number.
+ // TODO(james): For this and other accessors, determine if there is a
+ // significant performance gain to be had by using readScalar to construct
+ // an accessor method rather than having to redo the schema inspection on
+ // every call.
+ readScalar(table: Table, fieldName: string, readDefaults: boolean = false) {
+ const field = this.getField(fieldName, table.typeIndex);
+ const fieldType = field.type();
+ const isStruct = this.getType(table.typeIndex).isStruct();
+ if (!isScalar(fieldType.baseType())) {
+ throw new Error('Field ' + fieldName + ' is not a scalar type.');
+ }
+
+ if (isStruct) {
+ return table.readScalar(
+ fieldType.baseType(), table.offset + field.offset());
+ }
+
+ const offset =
+ table.offset + table.bb.__offset(table.offset, field.offset());
+ if (offset === table.offset) {
+ if (!readDefaults) {
+ return null;
+ }
+ if (isInteger(fieldType.baseType())) {
+ if (isLong(fieldType.baseType())) {
+ return field.defaultInteger();
+ } else {
+ if (field.defaultInteger().high != 0) {
+ throw new Error(
+ '<=4 byte integer types should not use 64-bit default values.');
+ }
+ return field.defaultInteger().low;
+ }
+ } else {
+ return field.defaultReal();
+ }
+ }
+ return table.readScalar(fieldType.baseType(), offset);
+ }
+ // Reads a string with the given field name from the provided Table.
+ // If the field is unset, returns null.
+ readString(table: Table, fieldName: string): string|null {
+ const field = this.getField(fieldName, table.typeIndex);
+ const fieldType = field.type();
+ if (fieldType.baseType() !== BaseType.String) {
+ throw new Error('Field ' + fieldName + ' is not a string.');
+ }
+
+ const offsetToOffset =
+ table.offset + table.bb.__offset(table.offset, field.offset());
+ if (offsetToOffset === table.offset) {
+ return null;
+ }
+ return table.bb.__string(offsetToOffset);
+ }
+ // Reads a sub-message from the given Table. The sub-message may either be
+ // a struct or a Table. Returns null if the sub-message is not set.
+ readTable(table: Table, fieldName: string): Table|null {
+ const field = this.getField(fieldName, table.typeIndex);
+ const fieldType = field.type();
+ const parentIsStruct = this.getType(table.typeIndex).isStruct();
+ if (fieldType.baseType() !== BaseType.Obj) {
+ throw new Error('Field ' + fieldName + ' is not an object type.');
+ }
+
+ if (parentIsStruct) {
+ return new Table(
+ table.bb, fieldType.index(), table.offset + field.offset());
+ }
+
+ const offsetToOffset =
+ table.offset + table.bb.__offset(table.offset, field.offset());
+ if (offsetToOffset === table.offset) {
+ return null;
+ }
+
+ const elementIsStruct = this.getType(fieldType.index()).isStruct();
+
+ const objectStart =
+ elementIsStruct ? offsetToOffset : table.bb.__indirect(offsetToOffset);
+ return new Table(table.bb, fieldType.index(), objectStart);
+ }
+ // Reads a vector of scalars (like readScalar, may return a vector of Long's
+ // instead). Also, will return null if the vector is not set.
+ readVectorOfScalars(table: Table, fieldName: string): number[]|null {
+ const field = this.getField(fieldName, table.typeIndex);
+ const fieldType = field.type();
+ if (fieldType.baseType() !== BaseType.Vector) {
+ throw new Error('Field ' + fieldName + ' is not an vector.');
+ }
+ if (!isScalar(fieldType.element())) {
+ throw new Error('Field ' + fieldName + ' is not an vector of scalars.');
+ }
+
+ const offsetToOffset =
+ table.offset + table.bb.__offset(table.offset, field.offset());
+ if (offsetToOffset === table.offset) {
+ return null;
+ }
+ const numElements = table.bb.__vector_len(offsetToOffset);
+ const result = [];
+ const baseOffset = table.bb.__vector(offsetToOffset);
+ const scalarSize = typeSize(fieldType.element());
+ for (let ii = 0; ii < numElements; ++ii) {
+ result.push(
+ table.readScalar(fieldType.element(), baseOffset + scalarSize * ii));
+ }
+ return result;
+ }
+ // Reads a vector of tables. Returns null if vector is not set.
+ readVectorOfTables(table: Table, fieldName: string) {
+ const field = this.getField(fieldName, table.typeIndex);
+ const fieldType = field.type();
+ if (fieldType.baseType() !== BaseType.Vector) {
+ throw new Error('Field ' + fieldName + ' is not an vector.');
+ }
+ if (fieldType.element() !== BaseType.Obj) {
+ throw new Error('Field ' + fieldName + ' is not an vector of objects.');
+ }
+
+ const offsetToOffset =
+ table.offset + table.bb.__offset(table.offset, field.offset());
+ if (offsetToOffset === table.offset) {
+ return null;
+ }
+ const numElements = table.bb.__vector_len(offsetToOffset);
+ const result = [];
+ const baseOffset = table.bb.__vector(offsetToOffset);
+ const elementSchema = this.getType(fieldType.index());
+ const elementIsStruct = elementSchema.isStruct();
+ const elementSize = elementIsStruct ? elementSchema.bytesize() :
+ typeSize(fieldType.element());
+ for (let ii = 0; ii < numElements; ++ii) {
+ const elementOffset = baseOffset + elementSize * ii;
+ result.push(new Table(
+ table.bb, fieldType.index(),
+ elementIsStruct ? elementOffset :
+ table.bb.__indirect(elementOffset)));
+ }
+ return result;
+ }
+ // Reads a vector of strings. Returns null if not set.
+ readVectorOfStrings(table: Table, fieldName: string): string[]|null {
+ const field = this.getField(fieldName, table.typeIndex);
+ const fieldType = field.type();
+ if (fieldType.baseType() !== BaseType.Vector) {
+ throw new Error('Field ' + fieldName + ' is not an vector.');
+ }
+ if (fieldType.element() !== BaseType.String) {
+ throw new Error('Field ' + fieldName + ' is not an vector of strings.');
+ }
+
+ const offsetToOffset =
+ table.offset + table.bb.__offset(table.offset, field.offset());
+ if (offsetToOffset === table.offset) {
+ return null;
+ }
+ const numElements = table.bb.__vector_len(offsetToOffset);
+ const result = [];
+ const baseOffset = table.bb.__vector(offsetToOffset);
+ const offsetSize = typeSize(fieldType.element());
+ for (let ii = 0; ii < numElements; ++ii) {
+ result.push(table.bb.__string(baseOffset + offsetSize * ii));
+ }
+ return result;
+ }
+}
diff --git a/aos/network/www/reflection_test.html b/aos/network/www/reflection_test.html
new file mode 100644
index 0000000..b9cc6c8
--- /dev/null
+++ b/aos/network/www/reflection_test.html
@@ -0,0 +1,8 @@
+<html>
+ <head>
+ <script src="flatbuffers.js"></script>
+ <script src="reflection_test_bundle.min.js" defer></script>
+ </head>
+ <body>
+ </body>
+</html>
diff --git a/aos/network/www/reflection_test_main.ts b/aos/network/www/reflection_test_main.ts
new file mode 100644
index 0000000..5c6436e
--- /dev/null
+++ b/aos/network/www/reflection_test_main.ts
@@ -0,0 +1,159 @@
+import {Configuration, Schema} from 'aos/configuration_generated'
+import {BaseType,
+ Configuration as TestTable,
+ FooStruct,
+ FooStructNested,
+ Location,
+ Map,
+ VectorOfStrings,
+ VectorOfVectorOfString} from 'aos/json_to_flatbuffer_generated'
+
+import {Connection} from './proxy';
+import {Parser, Table} from './reflection'
+// This file runs a basic test to confirm that the typescript flatbuffer
+// reflection library is working correctly. It currently is not run
+// automatically--to run it, run the web_proxy_demo sh_binary target, open the
+// resulting reflection_test.html webpage, open the console and confirm that
+// "TEST PASSED" has been printed.
+
+const conn = new Connection();
+
+conn.connect();
+
+function assertEqual(a: any, b: any, msg?: string): void {
+ if (a !== b) {
+ throw new Error(a + ' !== ' + b + ': ' + msg);
+ }
+}
+
+// Constructs a flatbuffer and then uses Parser.toObject to parse it and confirm
+// that the start/end results are the same. This is largely meant to ensure
+// that we are exercising most of the logic associated with parsing flatbuffers.
+function DoTest(config: Configuration): void {
+ const builder = new flatbuffers.Builder();
+ {
+ TestTable.startVectorFooStructVector(builder, 3);
+ const fooStruct0 = FooStruct.createFooStruct(builder, 66, 118);
+ const fooStruct1 = FooStruct.createFooStruct(builder, 67, 118);
+ const fooStruct2 = FooStruct.createFooStruct(builder, 68, 118);
+ const vectorFooStruct = builder.endVector();
+ const nameString = builder.createString('nameString');
+ const typeString = builder.createString('typeString');
+ const location0 =
+ Location.createLocation(builder, nameString, typeString, 100, 200);
+ const location1 =
+ Location.createLocation(builder, nameString, typeString, 300, 400);
+ const map = Map.createMap(builder, location0, location1);
+ const mapVector = TestTable.createMapsVector(builder, [map]);
+
+ const strVector =
+ VectorOfStrings.createStrVector(builder, [nameString, typeString]);
+ const vectorOfStrings =
+ VectorOfStrings.createVectorOfStrings(builder, strVector);
+ const vVector =
+ VectorOfVectorOfString.createVVector(builder, [vectorOfStrings]);
+ const vectorOfVectorOfStrings =
+ VectorOfVectorOfString.createVectorOfVectorOfString(builder, vVector);
+
+ const doubleVector =
+ TestTable.createVectorFooDoubleVector(builder, [9.71, 1.678, 2.056]);
+
+ TestTable.startConfiguration(builder);
+ TestTable.addMaps(builder, mapVector);
+ TestTable.addVov(builder, vectorOfVectorOfStrings);
+ const fooStruct = FooStruct.createFooStruct(builder, 33, 118);
+ TestTable.addFooStruct(builder, fooStruct);
+ TestTable.addVectorFooStruct(builder, vectorFooStruct);
+ TestTable.addVectorFooDouble(builder, doubleVector);
+ TestTable.addFooDouble(builder, 11.14);
+ TestTable.addFooLong(builder, new flatbuffers.Long(100, 1));
+ TestTable.addFooEnum(builder, BaseType.Array);
+ }
+
+ builder.finish(Configuration.endConfiguration(builder));
+ const array = builder.asUint8Array();
+ const fbBuffer = new flatbuffers.ByteBuffer(array);
+
+ const parsedFb = TestTable.getRootAsConfiguration(fbBuffer);
+
+ let testSchema = null;
+ for (let ii = 0; ii < config.channelsLength(); ++ii) {
+ if (config.channels(ii).type() === 'aos.testing.Configuration') {
+ testSchema = config.channels(ii).schema();
+ }
+ }
+ if (testSchema === null) {
+ throw new Error('Couldn\'t find test schema in config.');
+ }
+ const testParser = new Parser(testSchema);
+ const testTable = Table.getRootTable(fbBuffer);
+ const testObject = testParser.toObject(testTable, false);
+
+ console.log('Parsed test object:');
+ console.log(testObject);
+
+ assertEqual(11.14, parsedFb.fooDouble());
+ assertEqual(testObject['foo_double'], parsedFb.fooDouble());
+ assertEqual(testObject['foo_enum'], parsedFb.fooEnum());
+ assertEqual(testObject['foo_long'].low, parsedFb.fooLong().low);
+ assertEqual(testObject['foo_long'].high, parsedFb.fooLong().high);
+ assertEqual(testObject['foo_ulong'], undefined);
+ assertEqual(testObject['locations'], undefined);
+
+ const maps = testObject['maps'];
+ assertEqual(maps.length, 1);
+ assertEqual(maps[0]['match']['name'], 'nameString');
+ assertEqual(maps[0]['rename']['name'], 'nameString');
+ assertEqual(maps[0]['match']['type'], 'typeString');
+ assertEqual(maps[0]['rename']['type'], 'typeString');
+ assertEqual(
+ maps[0]['match']['frequency'], parsedFb.maps(0).match().frequency());
+ assertEqual(maps[0]['rename']['frequency'], 300);
+ assertEqual(maps[0]['match']['max_size'], 200);
+ assertEqual(maps[0]['rename']['max_size'], 400);
+
+ assertEqual(
+ testObject['foo_struct']['foo_byte'], parsedFb.fooStruct().fooByte());
+ assertEqual(
+ testObject['foo_struct']['nested_struct']['foo_byte'],
+ parsedFb.fooStruct().nestedStruct().fooByte());
+
+ const fooStructs = testObject['vector_foo_struct'];
+ assertEqual(fooStructs.length, 3);
+ for (let ii = 0; ii < 3; ++ii) {
+ assertEqual(
+ fooStructs[ii]['foo_byte'], parsedFb.vectorFooStruct(ii).fooByte());
+ assertEqual(
+ fooStructs[ii]['nested_struct']['foo_byte'],
+ parsedFb.vectorFooStruct(ii).nestedStruct().fooByte());
+ }
+
+ for (let ii = 0; ii < 3; ++ii) {
+ assertEqual(
+ testObject['vector_foo_double'][ii], parsedFb.vectorFooDouble(ii));
+ }
+
+ assertEqual(testObject['vov']['v'].length, 1);
+ assertEqual(testObject['vov']['v'][0]['str'].length, 2);
+ assertEqual(testObject['vov']['v'][0]['str'][0], parsedFb.vov().v(0).str(0));
+ assertEqual(testObject['vov']['v'][0]['str'][1], parsedFb.vov().v(0).str(1));
+ console.log('TEST PASSED');
+}
+
+conn.addConfigHandler((config: Configuration) => {
+ let configSchema = null;
+ for (let ii = 0; ii < config.channelsLength(); ++ii) {
+ if (config.channels(ii).type() === 'aos.Configuration') {
+ configSchema = config.channels(ii).schema();
+ }
+ }
+ if (configSchema === null) {
+ throw new Error('Couldn\'t find Configuration schema in config.');
+ }
+ let configParser = new Parser(configSchema);
+ const configTable = Table.getRootTable(config.bb);
+ console.log('Received config:');
+ console.log(configParser.toObject(configTable, true));
+
+ DoTest(config);
+});
diff --git a/aos/network/www/test_config_file.json b/aos/network/www/test_config_file.json
new file mode 100644
index 0000000..09b22dd
--- /dev/null
+++ b/aos/network/www/test_config_file.json
@@ -0,0 +1,21 @@
+{
+ "channels": [
+ {
+ "name": "/test",
+ "type": "aos.testing.Configuration",
+ "source_node": "roborio",
+ "frequency": 200
+ },
+ {
+ "name": "/test",
+ "type": "aos.Configuration",
+ "source_node": "roborio",
+ "max_size": 1678,
+ "frequency": 200
+ }
+ ],
+ "channel_storage_duration": 31415,
+ "imports": [
+ "../../../aos/events/aos.json"
+ ]
+}
diff --git a/aos/network/www/web_proxy_demo.sh b/aos/network/www/web_proxy_demo.sh
new file mode 100755
index 0000000..c3fe715
--- /dev/null
+++ b/aos/network/www/web_proxy_demo.sh
@@ -0,0 +1 @@
+./aos/network/web_proxy_main --config=aos/network/www/test_config.json --data_dir=aos/network/www
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
index 34d772c..a7360c1 100644
--- a/y2020/control_loops/drivetrain/localizer_test.cc
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -86,7 +86,7 @@
return locations;
}
-constexpr std::chrono::seconds kPiTimeOffset(10);
+constexpr std::chrono::seconds kPiTimeOffset(-10);
} // namespace
namespace chrono = std::chrono;
@@ -129,7 +129,7 @@
drivetrain_plant_(drivetrain_plant_event_loop_.get(), dt_config_),
last_frame_(monotonic_now()) {
event_loop_factory()->GetNodeEventLoopFactory(pi1_)->SetDistributedOffset(
- kPiTimeOffset);
+ kPiTimeOffset, 1.0);
set_team_id(frc971::control_loops::testing::kTeamNumber);
set_battery_voltage(12.0);
@@ -167,7 +167,7 @@
builder.MakeBuilder<aos::message_bridge::ServerConnection>();
connection_builder.add_node(node_offset);
connection_builder.add_monotonic_offset(
- chrono::duration_cast<chrono::nanoseconds>(-kPiTimeOffset)
+ chrono::duration_cast<chrono::nanoseconds>(kPiTimeOffset)
.count());
auto connection_offset = connection_builder.Finish();
auto connections_offset =