Merge changes Id9336d68,I85d0735c
* changes:
Make timestamp_to_csv work with multiple boots
Estimate the distributed clock with boots accounted for
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index fead91d..952c16d 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -13,20 +13,30 @@
)
cc_library(
+ name = "boot_timestamp",
+ srcs = ["boot_timestamp.cc"],
+ hdrs = ["boot_timestamp.h"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//aos/time",
+ ],
+)
+
+cc_library(
name = "logfile_utils",
srcs = [
- "boot_timestamp.cc",
"logfile_sorting.cc",
"logfile_utils.cc",
],
hdrs = [
- "boot_timestamp.h",
"logfile_sorting.h",
"logfile_utils.h",
],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
deps = [
+ ":boot_timestamp",
":buffer_encoder",
":logger_fbs",
"//aos:uuid",
diff --git a/aos/events/logging/boot_timestamp.cc b/aos/events/logging/boot_timestamp.cc
index c037633..cdd33b6 100644
--- a/aos/events/logging/boot_timestamp.cc
+++ b/aos/events/logging/boot_timestamp.cc
@@ -11,4 +11,10 @@
<< "}";
}
+std::ostream &operator<<(std::ostream &os,
+ const struct BootDuration &duration) {
+ return os << "{.boot=" << duration.boot
+ << ", .duration=" << duration.duration.count() << "ns}";
+}
+
} // namespace aos::logger
diff --git a/aos/events/logging/boot_timestamp.h b/aos/events/logging/boot_timestamp.h
index ea66c8e..64c824a 100644
--- a/aos/events/logging/boot_timestamp.h
+++ b/aos/events/logging/boot_timestamp.h
@@ -7,6 +7,23 @@
namespace aos::logger {
+// Simple class representing a duration in time and a boot it is from. This
+// gives us something to use for storing the time offset when filtering.
+struct BootDuration {
+ // Boot number for this timestamp.
+ size_t boot = 0u;
+ // Monotonic time in that boot.
+ monotonic_clock::duration duration{0};
+
+ BootDuration operator+(monotonic_clock::duration d) const {
+ return {boot, duration + d};
+ }
+
+ bool operator==(const BootDuration &m2) const {
+ return boot == m2.boot && duration == m2.duration;
+ }
+};
+
// Simple class representing which boot and what monotonic time in that boot.
// Boots are assumed to be sequential, and the monotonic clock resets on reboot
// for all the compare operations.
@@ -16,13 +33,21 @@
// Monotonic time in that boot.
monotonic_clock::time_point time = monotonic_clock::min_time;
+ monotonic_clock::duration time_since_epoch() const {
+ return time.time_since_epoch();
+ }
+
static constexpr BootTimestamp min_time() {
- return BootTimestamp{.boot = 0u, .time = monotonic_clock::min_time};
+ return BootTimestamp{.boot = std::numeric_limits<size_t>::min(),
+ .time = monotonic_clock::min_time};
}
static constexpr BootTimestamp max_time() {
return BootTimestamp{.boot = std::numeric_limits<size_t>::max(),
.time = monotonic_clock::max_time};
}
+ static constexpr BootTimestamp epoch() {
+ return BootTimestamp{.boot = 0, .time = monotonic_clock::epoch()};
+ }
// Compare operators. These are implemented such that earlier boots always
// compare less than later boots, and the times are only compared in a single
@@ -33,10 +58,21 @@
bool operator>(const BootTimestamp &m2) const;
bool operator==(const BootTimestamp &m2) const;
bool operator!=(const BootTimestamp &m2) const;
+
+ BootTimestamp operator+(monotonic_clock::duration d) const {
+ return {boot, time + d};
+ }
+ BootTimestamp operator-(monotonic_clock::duration d) const {
+ return {boot, time - d};
+ }
+ BootTimestamp operator+(BootDuration d) const {
+ return {boot, time + d.duration};
+ }
};
std::ostream &operator<<(std::ostream &os,
const struct BootTimestamp ×tamp);
+std::ostream &operator<<(std::ostream &os, const struct BootDuration &duration);
inline bool BootTimestamp::operator<(const BootTimestamp &m2) const {
if (boot != m2.boot) {
diff --git a/aos/events/logging/log_reader.cc b/aos/events/logging/log_reader.cc
index 8a8f5af..b320474 100644
--- a/aos/events/logging/log_reader.cc
+++ b/aos/events/logging/log_reader.cc
@@ -1484,7 +1484,8 @@
// TODO(austin): We probably want to push this down into the timestamp
// mapper directly.
- filter->Pop(event_loop_->node(), event_loop_->monotonic_now());
+ // TODO(austin): This hard-codes the boot to 0. We need to fix that.
+ filter->Pop(event_loop_->node(), {0, event_loop_->monotonic_now()});
}
VLOG(1) << "Popped " << result
<< configuration::CleanedChannelToString(
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index d6ad8fe..195d442 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -1466,8 +1466,8 @@
// TODO(austin): Negate...
const chrono::nanoseconds initial_pi2_offset = chrono::seconds(1000);
- time_converter_.AddMonotonic({monotonic_clock::epoch(),
- monotonic_clock::epoch() + initial_pi2_offset});
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + initial_pi2_offset});
// Wait for 95 ms, (~0.1 seconds - 1/2 of the ping/pong period), and set the
// skew to be 200 uS/s
const chrono::nanoseconds startup_sleep1 = time_converter_.AddMonotonic(
@@ -2169,8 +2169,8 @@
// up a clock difference between 2 nodes and looking at the resulting parts.
TEST_P(MultinodeLoggerTest, LoggerStartTime) {
time_converter_.AddMonotonic(
- {monotonic_clock::epoch(),
- monotonic_clock::epoch() + chrono::seconds(1000)});
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(1000)});
{
LoggerState pi1_logger = MakeLogger(pi1_);
LoggerState pi2_logger = MakeLogger(pi2_);
@@ -2208,8 +2208,8 @@
util::UnlinkRecursive(tmp_dir_ + "/renamefolder");
util::UnlinkRecursive(tmp_dir_ + "/new-good");
time_converter_.AddMonotonic(
- {monotonic_clock::epoch(),
- monotonic_clock::epoch() + chrono::seconds(1000)});
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(1000)});
logfile_base1_ = tmp_dir_ + "/renamefolder/multi_logfile1";
logfile_base2_ = tmp_dir_ + "/renamefolder/multi_logfile2";
logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
@@ -2234,8 +2234,8 @@
// Test that renaming the file base dies.
TEST_P(MultinodeLoggerDeathTest, LoggerRenameFile) {
time_converter_.AddMonotonic(
- {monotonic_clock::epoch(),
- monotonic_clock::epoch() + chrono::seconds(1000)});
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(1000)});
util::UnlinkRecursive(tmp_dir_ + "/renamefile");
logfile_base1_ = tmp_dir_ + "/renamefile/multi_logfile1";
logfile_base2_ = tmp_dir_ + "/renamefile/multi_logfile2";
@@ -2413,8 +2413,8 @@
TEST_P(MultinodeLoggerTest, OneDirectionWithNegativeSlope) {
event_loop_factory_.GetNodeEventLoopFactory(pi1_)->Disconnect(pi2_);
time_converter_.AddMonotonic(
- {monotonic_clock::epoch(),
- monotonic_clock::epoch() + chrono::seconds(1000)});
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(1000)});
time_converter_.AddMonotonic(
{chrono::milliseconds(10000),
@@ -2439,8 +2439,8 @@
TEST_P(MultinodeLoggerTest, OneDirectionWithPositiveSlope) {
event_loop_factory_.GetNodeEventLoopFactory(pi1_)->Disconnect(pi2_);
time_converter_.AddMonotonic(
- {monotonic_clock::epoch(),
- monotonic_clock::epoch() + chrono::seconds(500)});
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(500)});
time_converter_.AddMonotonic(
{chrono::milliseconds(10000),
@@ -2466,8 +2466,8 @@
event_loop_factory_.GetNodeEventLoopFactory(pi1_)->Disconnect(pi2_);
event_loop_factory_.GetNodeEventLoopFactory(pi2_)->Disconnect(pi1_);
time_converter_.AddMonotonic(
- {monotonic_clock::epoch(),
- monotonic_clock::epoch() + chrono::seconds(1000)});
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(1000)});
{
LoggerState pi1_logger = MakeLogger(pi1_);
diff --git a/aos/events/logging/timestamp_extractor.cc b/aos/events/logging/timestamp_extractor.cc
index 232b842..6a2c95a 100644
--- a/aos/events/logging/timestamp_extractor.cc
+++ b/aos/events/logging/timestamp_extractor.cc
@@ -53,13 +53,6 @@
// Confirm that all the parts are from the same boot if there are enough
// parts to not be from the same boot.
if (!filtered_parts.empty()) {
- for (size_t i = 1; i < filtered_parts.size(); ++i) {
- CHECK_EQ(filtered_parts[i].source_boot_uuid,
- filtered_parts[0].source_boot_uuid)
- << ": Found parts from different boots "
- << LogFileVectorToString(log_files);
- }
-
// Filter the parts relevant to each node when building the mapper.
mappers.emplace_back(
std::make_unique<TimestampMapper>(std::move(filtered_parts)));
@@ -126,30 +119,38 @@
// Don't get clever. Use the first time as the start time. Note: this is
// different than how log_cat and others work.
- std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
- next_timestamp = multinode_estimator.NextTimestamp();
+ std::optional<const std::tuple<distributed_clock::time_point,
+ std::vector<BootTimestamp>> *>
+ next_timestamp = multinode_estimator.QueueNextTimestamp();
CHECK(next_timestamp);
LOG(INFO) << "Starting at:";
for (const Node *node : configuration::GetNodes(config)) {
const size_t node_index = configuration::GetNodeIndex(config, node);
LOG(INFO) << " " << node->name()->string_view() << " -> "
- << std::get<1>(*next_timestamp)[node_index];
+ << std::get<1>(*next_timestamp.value())[node_index].time;
}
- multinode_estimator.Start(std::get<1>(*next_timestamp));
+ std::vector<monotonic_clock::time_point> just_monotonic(
+ std::get<1>(*next_timestamp.value()).size());
+ for (size_t i = 0; i < just_monotonic.size(); ++i) {
+ CHECK_EQ(std::get<1>(*next_timestamp.value())[i].boot, 0u);
+ just_monotonic[i] = std::get<1>(*next_timestamp.value())[i].time;
+ }
+ multinode_estimator.Start(just_monotonic);
// As we pull off all the timestamps, the time problem is continually solved,
// filling in the CSV files.
while (true) {
- std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
- next_timestamp = multinode_estimator.NextTimestamp();
+ std::optional<const std::tuple<distributed_clock::time_point,
+ std::vector<BootTimestamp>> *>
+ next_timestamp = multinode_estimator.QueueNextTimestamp();
if (!next_timestamp) {
break;
}
}
+ LOG(INFO) << "Done";
+
return 0;
}
diff --git a/aos/events/logging/timestamp_plot.gnuplot b/aos/events/logging/timestamp_plot.gnuplot
index 94ef7e4..1028948 100755
--- a/aos/events/logging/timestamp_plot.gnuplot
+++ b/aos/events/logging/timestamp_plot.gnuplot
@@ -9,9 +9,7 @@
print "Node1: ", node1
print "Node2: ", node2
-node1_start_time = system("grep " . node1 . " /tmp/timestamp_noncausal_starttime.csv | awk '{print $2}'") + 0
node1_index = int(system("grep -n " . node1 . " /tmp/timestamp_noncausal_starttime.csv | sed 's/:.*//'")) + 1
-node2_start_time = system("grep " . node2 . " /tmp/timestamp_noncausal_starttime.csv | awk '{print $2}'") + 0
node2_index = int(system("grep -n " . node2 . " /tmp/timestamp_noncausal_starttime.csv | sed 's/:.*//'")) + 1
noncausalfile12 = sprintf("/tmp/timestamp_noncausal_%s_%s.csv", node1, node2)
@@ -32,7 +30,7 @@
samplefile21 using 1:(-$2) title 'sample 2-1', \
noncausalfile12 using 1:3 title 'nc 1-2' with lines, \
noncausalfile21 using 1:(-$3) title 'nc 2-1' with lines, \
- offsetfile using ((column(node1_index) - node1_start_time + (column(node2_index) - node2_start_time)) / 2):(column(node2_index) - column(node1_index)) title 'filter 2-1' with linespoints
+ offsetfile using 1:(column(node2_index) - column(node1_index)) title 'filter 2-1' with linespoints
if (ARG3 ne "" ) {
exit
diff --git a/aos/events/simulated_event_loop_test.cc b/aos/events/simulated_event_loop_test.cc
index 4cb51de..83568da 100644
--- a/aos/events/simulated_event_loop_test.cc
+++ b/aos/events/simulated_event_loop_test.cc
@@ -73,10 +73,10 @@
}
INSTANTIATE_TEST_SUITE_P(SimulatedEventLoopCommonTest, AbstractEventLoopTest,
- CommonParameters());
+ CommonParameters());
INSTANTIATE_TEST_SUITE_P(SimulatedEventLoopCommonDeathTest,
- AbstractEventLoopDeathTest, CommonParameters());
+ AbstractEventLoopDeathTest, CommonParameters());
// Parameters to run all the tests with.
struct Param {
@@ -785,8 +785,8 @@
constexpr chrono::milliseconds kOffset{1501};
time.AddNextTimestamp(
distributed_clock::epoch(),
- {monotonic_clock::epoch(), monotonic_clock::epoch() + kOffset,
- monotonic_clock::epoch()});
+ {logger::BootTimestamp::epoch(), logger::BootTimestamp::epoch() + kOffset,
+ logger::BootTimestamp::epoch()});
std::unique_ptr<EventLoop> ping_event_loop =
simulated_event_loop_factory.MakeEventLoop("ping", pi1);
@@ -1293,13 +1293,13 @@
constexpr chrono::milliseconds kOffset{150100};
time.AddNextTimestamp(
distributed_clock::epoch(),
- {monotonic_clock::epoch(), monotonic_clock::epoch() + kOffset,
- monotonic_clock::epoch()});
+ {logger::BootTimestamp::epoch(), logger::BootTimestamp::epoch() + kOffset,
+ logger::BootTimestamp::epoch()});
time.AddNextTimestamp(
distributed_clock::epoch() + chrono::seconds(10),
- {monotonic_clock::epoch() + chrono::milliseconds(9999),
- monotonic_clock::epoch() + kOffset + chrono::seconds(10),
- monotonic_clock::epoch() + chrono::milliseconds(9999)});
+ {logger::BootTimestamp::epoch() + chrono::milliseconds(9999),
+ logger::BootTimestamp::epoch() + kOffset + chrono::seconds(10),
+ logger::BootTimestamp::epoch() + chrono::milliseconds(9999)});
std::unique_ptr<EventLoop> ping_event_loop =
simulated_event_loop_factory.MakeEventLoop("ping", pi1);
diff --git a/aos/network/BUILD b/aos/network/BUILD
index 067ae35..48a71c6 100644
--- a/aos/network/BUILD
+++ b/aos/network/BUILD
@@ -505,6 +505,7 @@
target_compatible_with = ["@platforms//os:linux"],
deps = [
"//aos:configuration",
+ "//aos/events/logging:boot_timestamp",
"//aos/time",
"@com_google_absl//absl/strings",
],
diff --git a/aos/network/multinode_timestamp_filter.cc b/aos/network/multinode_timestamp_filter.cc
index ebb7a19..3b5b997 100644
--- a/aos/network/multinode_timestamp_filter.cc
+++ b/aos/network/multinode_timestamp_filter.cc
@@ -24,6 +24,7 @@
namespace message_bridge {
namespace {
namespace chrono = std::chrono;
+using aos::logger::BootDuration;
using aos::logger::BootTimestamp;
const Eigen::IOFormat kHeavyFormat(Eigen::StreamPrecision, Eigen::DontAlignCols,
@@ -45,8 +46,7 @@
// ms/s. Figure out how to define it. Do this last. This lets us handle
// constraints going away, and constraints close in time.
-bool TimestampProblem::ValidateSolution(
- std::vector<monotonic_clock::time_point> solution) {
+bool TimestampProblem::ValidateSolution(std::vector<BootTimestamp> solution) {
bool success = true;
for (size_t i = 0u; i < filters_.size(); ++i) {
for (const struct FilterPair &filter : filters_[i]) {
@@ -199,7 +199,7 @@
return a.colPivHouseholderQr().solve(b);
}
-std::vector<monotonic_clock::time_point> TimestampProblem::SolveNewton() {
+std::vector<BootTimestamp> TimestampProblem::SolveNewton() {
constexpr int kMaxIterations = 200;
MaybeUpdateNodeMapping();
VLOG(1) << "Solving for node " << solution_node_ << " at "
@@ -255,7 +255,7 @@
std::abs(data(solution_index)) > 1000) {
int64_t dsolution =
static_cast<int64_t>(std::round(data(solution_index)));
- base_clock_[j] += chrono::nanoseconds(dsolution);
+ base_clock_[j].time += chrono::nanoseconds(dsolution);
data(solution_index) -= dsolution;
}
}
@@ -270,16 +270,17 @@
VLOG(1) << "Solving for node " << solution_node_ << " of "
<< base_clock(solution_node_) << " in " << solution_number
<< " cycles";
- std::vector<monotonic_clock::time_point> result(size());
+ std::vector<BootTimestamp> result(size());
for (size_t i = 0; i < size(); ++i) {
if (live(i)) {
- result[i] =
- base_clock(i) + std::chrono::nanoseconds(static_cast<int64_t>(
- std::round(data(NodeToFullSolutionIndex(i)))));
+ result[i].boot = base_clock(i).boot;
+ result[i].time = base_clock(i).time +
+ std::chrono::nanoseconds(static_cast<int64_t>(
+ std::round(data(NodeToFullSolutionIndex(i)))));
VLOG(1) << "live " << result[i] << " "
<< data(NodeToFullSolutionIndex(i));
} else {
- result[i] = monotonic_clock::min_time;
+ result[i] = BootTimestamp::min_time();
VLOG(1) << "dead " << result[i];
}
}
@@ -328,27 +329,36 @@
}
}
+std::optional<const std::tuple<distributed_clock::time_point,
+ std::vector<BootTimestamp>> *>
+InterpolatedTimeConverter::QueueNextTimestamp() {
+ std::optional<
+ std::tuple<distributed_clock::time_point, std::vector<BootTimestamp>>>
+ next_time = NextTimestamp();
+ if (!next_time) {
+ VLOG(1) << "Last timestamp, calling it quits";
+ at_end_ = true;
+ return std::nullopt;
+ }
+
+ VLOG(1) << "Fetched next timestamp while solving: " << std::get<0>(*next_time)
+ << " ->";
+ for (BootTimestamp t : std::get<1>(*next_time)) {
+ VLOG(1) << " " << t;
+ }
+
+ // TODO(austin): Figure out how to communicate the reboot up to the factory.
+ CHECK_EQ(node_count_, std::get<1>(*next_time).size());
+ times_.emplace_back(std::move(*next_time));
+ return ×_.back();
+}
+
void InterpolatedTimeConverter::QueueUntil(
- std::function<
- bool(const std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>> &)>
+ std::function<bool(const std::tuple<distributed_clock::time_point,
+ std::vector<BootTimestamp>> &)>
not_done) {
while (!at_end_ && (times_.empty() || not_done(times_.back()))) {
- std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
- next_time = NextTimestamp();
- if (!next_time) {
- VLOG(1) << "Last timestamp, calling it quits";
- at_end_ = true;
- break;
- }
- VLOG(1) << "Fetched next timestamp while solving: "
- << std::get<0>(*next_time) << " ->";
- for (monotonic_clock::time_point t : std::get<1>(*next_time)) {
- VLOG(1) << " " << t;
- }
- CHECK_EQ(node_count_, std::get<1>(*next_time).size());
- times_.emplace_back(std::move(*next_time));
+ QueueNextTimestamp();
}
CHECK(!times_.empty())
@@ -378,56 +388,10 @@
}
}
-distributed_clock::time_point InterpolatedTimeConverter::ToDistributedClock(
- size_t node_index, monotonic_clock::time_point time) {
- CHECK_LT(node_index, node_count_);
- // If there is only one node, time estimation makes no sense. Just return
- // unity time.
- if (node_count_ == 1u) {
- return distributed_clock::epoch() + time.time_since_epoch();
- }
-
- // Make sure there are enough timestamps in the queue.
- QueueUntil(
- [time, node_index](
- const std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>> &t) {
- return std::get<1>(t)[node_index] < time;
- });
-
- // Before the beginning needs to have 0 slope otherwise time jumps when
- // timestamp 2 happens.
- if (times_.size() == 1u || time < std::get<1>(times_[0])[node_index]) {
- if (time < std::get<1>(times_[0])[node_index]) {
- CHECK(!have_popped_)
- << ": Trying to interpolate time " << time
- << " but we have forgotten the relevant points already.";
- }
- const distributed_clock::time_point result =
- time - std::get<1>(times_[0])[node_index] + std::get<0>(times_[0]);
- VLOG(2) << "ToDistributedClock(" << node_index << ", " << time << ") -> "
- << result;
- return result;
- }
-
- // Now, find the corresponding timestamps. Search from the back since that's
- // where most of the times we care about will be.
- size_t index = times_.size() - 2u;
- while (index > 0u) {
- if (std::get<1>(times_[index])[node_index] <= time) {
- break;
- }
- --index;
- }
-
- // Interpolate with the two of these.
- const distributed_clock::time_point d0 = std::get<0>(times_[index]);
- const distributed_clock::time_point d1 = std::get<0>(times_[index + 1]);
-
- const monotonic_clock::time_point t0 = std::get<1>(times_[index])[node_index];
- const monotonic_clock::time_point t1 =
- std::get<1>(times_[index + 1])[node_index];
-
+distributed_clock::time_point ToDistributedClock(
+ distributed_clock::time_point d0, distributed_clock::time_point d1,
+ monotonic_clock::time_point t0, monotonic_clock::time_point t1,
+ monotonic_clock::time_point time) {
const chrono::nanoseconds dt = (t1 - t0);
CHECK_NE(dt.count(), 0u) << " t0 " << t0 << " t1 " << t1 << " d0 " << d0
@@ -444,9 +408,67 @@
absl::int128((time - t0).count()) * absl::int128((d1 - d0).count());
numerator += numerator > 0 ? absl::int128(dt.count() / 2)
: -absl::int128(dt.count() / 2);
+ return d0 + std::chrono::nanoseconds(
+ static_cast<int64_t>(numerator / absl::int128(dt.count())));
+}
+
+distributed_clock::time_point InterpolatedTimeConverter::ToDistributedClock(
+ size_t node_index, monotonic_clock::time_point time) {
+ CHECK_LT(node_index, node_count_);
+ // If there is only one node, time estimation makes no sense. Just return
+ // unity time.
+ if (node_count_ == 1u) {
+ return distributed_clock::epoch() + time.time_since_epoch();
+ }
+
+ // Make sure there are enough timestamps in the queue.
+ QueueUntil(
+ [time, node_index](const std::tuple<distributed_clock::time_point,
+ std::vector<BootTimestamp>> &t) {
+ return std::get<1>(t)[node_index].time < time;
+ });
+
+ // Before the beginning needs to have 0 slope otherwise time jumps when
+ // timestamp 2 happens.
+ if (times_.size() == 1u || time < std::get<1>(times_[0])[node_index].time) {
+ if (time < std::get<1>(times_[0])[node_index].time) {
+ CHECK(!have_popped_)
+ << ": Trying to interpolate time " << time
+ << " but we have forgotten the relevant points already.";
+ }
+ const distributed_clock::time_point result =
+ time - std::get<1>(times_[0])[node_index].time + std::get<0>(times_[0]);
+ VLOG(2) << "ToDistributedClock(" << node_index << ", " << time << ") -> "
+ << result;
+ return result;
+ }
+
+ // Now, find the corresponding timestamps. Search from the back since that's
+ // where most of the times we care about will be.
+ size_t index = times_.size() - 2u;
+ while (index > 0u) {
+ // TODO(austin): Binary search.
+ if (std::get<1>(times_[index])[node_index].time <= time) {
+ break;
+ }
+ --index;
+ }
+
+ // Interpolate with the two of these.
+ const distributed_clock::time_point d0 = std::get<0>(times_[index]);
+ const distributed_clock::time_point d1 = std::get<0>(times_[index + 1]);
+
+ // TODO(austin): We should extrapolate if the boot changes.
+ CHECK_EQ(std::get<1>(times_[index])[node_index].boot,
+ std::get<1>(times_[index + 1])[node_index].boot);
+ const monotonic_clock::time_point t0 =
+ std::get<1>(times_[index])[node_index].time;
+ const monotonic_clock::time_point t1 =
+ std::get<1>(times_[index + 1])[node_index].time;
+
const distributed_clock::time_point result =
- d0 + std::chrono::nanoseconds(
- static_cast<int64_t>(numerator / absl::int128(dt.count())));
+ message_bridge::ToDistributedClock(d0, d1, t0, t1, time);
+
VLOG(2) << "ToDistributedClock(" << node_index << ", " << time << ") -> "
<< result;
return result;
@@ -464,7 +486,7 @@
// Make sure there are enough timestamps in the queue.
QueueUntil(
[time](const std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>> &t) {
+ std::vector<BootTimestamp>> &t) {
return std::get<0>(t) < time;
});
@@ -475,7 +497,7 @@
<< " but we have forgotten the relevant points already.";
}
monotonic_clock::time_point result =
- time - std::get<0>(times_[0]) + std::get<1>(times_[0])[node_index];
+ time - std::get<0>(times_[0]) + std::get<1>(times_[0])[node_index].time;
VLOG(2) << "FromDistributedClock(" << node_index << ", " << time << ") -> "
<< result;
return result;
@@ -495,9 +517,12 @@
const distributed_clock::time_point d0 = std::get<0>(times_[index]);
const distributed_clock::time_point d1 = std::get<0>(times_[index + 1]);
- const monotonic_clock::time_point t0 = std::get<1>(times_[index])[node_index];
+ CHECK_EQ(std::get<1>(times_[index])[node_index].boot,
+ std::get<1>(times_[index + 1])[node_index].boot);
+ const monotonic_clock::time_point t0 =
+ std::get<1>(times_[index])[node_index].time;
const monotonic_clock::time_point t1 =
- std::get<1>(times_[index + 1])[node_index];
+ std::get<1>(times_[index + 1])[node_index].time;
const chrono::nanoseconds dd = d1 - d0;
@@ -537,7 +562,7 @@
logged_configuration_(logged_configuration),
skip_order_validation_(skip_order_validation) {
filters_per_node_.resize(NodesCount());
- last_monotonics_.resize(NodesCount(), aos::monotonic_clock::epoch());
+ last_monotonics_.resize(NodesCount(), BootTimestamp::epoch());
if (FLAGS_timestamps_to_csv &&
configuration::MultiNode(logged_configuration)) {
fp_ = fopen("/tmp/timestamp_noncausal_offsets.csv", "w");
@@ -546,21 +571,54 @@
fprintf(fp_, ", %s", node->name()->c_str());
}
fprintf(fp_, "\n");
+ filter_fps_.resize(NodesCount());
+ for (auto &filter_fp : filter_fps_) {
+ filter_fp.resize(NodesCount(), nullptr);
+ }
+ sample_fps_.resize(NodesCount());
+ for (auto &sample_fp : sample_fps_) {
+ sample_fp.resize(NodesCount(), nullptr);
+ }
+
+ node_samples_.resize(NodesCount());
+ for (NodeSamples &node_samples : node_samples_) {
+ node_samples.nodes.resize(NodesCount());
+ }
+
+ source_node_index_ = configuration::SourceNodeIndex(logged_configuration);
}
}
MultiNodeNoncausalOffsetEstimator::~MultiNodeNoncausalOffsetEstimator() {
+ FlushAllSamples(true);
if (fp_) {
fclose(fp_);
fp_ = NULL;
}
+ if (filter_fps_.size() != 0) {
+ for (std::vector<FILE *> &filter_fp : filter_fps_) {
+ for (FILE *&fp : filter_fp) {
+ if (fp != nullptr) {
+ fclose(fp);
+ }
+ }
+ }
+ }
+ if (sample_fps_.size() != 0) {
+ for (std::vector<FILE *> &filter_fp : sample_fps_) {
+ for (FILE *&fp : filter_fp) {
+ if (fp != nullptr) {
+ fclose(fp);
+ }
+ }
+ }
+ }
if (all_done_) {
size_t node_a_index = 0;
for (const auto &filters : filters_per_node_) {
for (const auto &filter : filters) {
- std::optional<
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
- next = filter.filter->Consume();
+ std::optional<std::tuple<BootTimestamp, BootDuration>> next =
+ filter.filter->Consume();
if (next) {
skip_order_validation_
? LOG(WARNING)
@@ -574,6 +632,15 @@
++node_a_index;
}
}
+
+ // Make sure everything is flushed to disk.
+ if (!node_samples_.empty()) {
+ for (NodeSamples &node : node_samples_) {
+ for (SingleNodeSamples ×tamps : node.nodes) {
+ CHECK (timestamps.messages.empty());
+ }
+ }
+ }
}
void MultiNodeNoncausalOffsetEstimator::Start(
@@ -587,19 +654,6 @@
void MultiNodeNoncausalOffsetEstimator::Start(
std::vector<monotonic_clock::time_point> times) {
- for (std::pair<const std::tuple<const Node *, const Node *>,
- message_bridge::NoncausalOffsetEstimator> &filter : filters_) {
- const Node *const node_a = std::get<0>(filter.first);
- const size_t node_a_index =
- configuration::GetNodeIndex(configuration_, node_a);
- const Node *const node_b = std::get<1>(filter.first);
- const size_t node_b_index =
- configuration::GetNodeIndex(configuration_, node_b);
-
- filter.second.SetFirstFwdTime(times[node_a_index]);
- filter.second.SetFirstRevTime(times[node_b_index]);
- }
-
std::fstream s("/tmp/timestamp_noncausal_starttime.csv", s.trunc | s.out);
CHECK(s.is_open());
for (const Node *node : configuration::GetNodes(configuration())) {
@@ -629,9 +683,8 @@
if (it == filters_.end()) {
auto &x = filters_
- .insert(std::make_pair(
- tuple,
- message_bridge::NoncausalOffsetEstimator(node_a, node_b)))
+ .emplace(tuple, message_bridge::NoncausalOffsetEstimator(
+ node_a, node_b))
.first->second;
const size_t node_a_index =
@@ -644,16 +697,6 @@
node_b_index);
filters_per_node_[node_b_index].emplace_back(x.GetFilter(node_b),
node_a_index);
-
- if (FLAGS_timestamps_to_csv) {
- x.SetFwdCsvFileName(absl::StrCat("/tmp/timestamp_noncausal_",
- node_a->name()->string_view(), "_",
- node_b->name()->string_view()));
- x.SetRevCsvFileName(absl::StrCat("/tmp/timestamp_noncausal_",
- node_b->name()->string_view(), "_",
- node_a->name()->string_view()));
- }
-
return &x;
} else {
return &it->second;
@@ -695,9 +738,7 @@
<< ": Timestamps queued before we registered the timestamp hooks.";
timestamp_mapper->set_timestamp_callback(
[this, node_index](logger::TimestampedMessage *msg) {
- // TODO(austin): Funnel the boot index through the offset estimator.
- CHECK_EQ(msg->monotonic_remote_time.boot, 0u);
- if (msg->monotonic_remote_time.time != monotonic_clock::min_time) {
+ if (msg->monotonic_remote_time != BootTimestamp::min_time()) {
// Got a forwarding timestamp!
NoncausalOffsetEstimator *filter =
filters_per_channel_[node_index][msg->channel_index];
@@ -706,18 +747,41 @@
// Call the correct method depending on if we are the forward or
// reverse direction here.
- CHECK_EQ(msg->monotonic_event_time.boot, 0u);
- filter->Sample(node, msg->monotonic_event_time.time,
- msg->monotonic_remote_time.time);
+ filter->Sample(node, msg->monotonic_event_time,
+ msg->monotonic_remote_time);
- CHECK_EQ(msg->monotonic_timestamp_time.boot, 0u);
- if (msg->monotonic_timestamp_time.time !=
- monotonic_clock::min_time) {
+ if (!node_samples_.empty()) {
+ const size_t sending_node_index =
+ source_node_index_[msg->channel_index];
+ // The message went from node sending_node_index to
+ // node_index. monotonic_remote_time is the time it was sent,
+ // and monotonic_event_time was the time it was received.
+ node_samples_[node_index]
+ .nodes[sending_node_index]
+ .messages.emplace(std::make_pair(
+ msg->monotonic_event_time, msg->monotonic_remote_time));
+ }
+
+ if (msg->monotonic_timestamp_time != BootTimestamp::min_time()) {
// TODO(austin): This assumes that this timestamp is only logged
// on the node which sent the data. That is correct for now,
// but should be explicitly checked somewhere.
- filter->ReverseSample(node, msg->monotonic_event_time.time,
- msg->monotonic_timestamp_time.time);
+ filter->ReverseSample(node, msg->monotonic_event_time,
+ msg->monotonic_timestamp_time);
+
+ if (!node_samples_.empty()) {
+ const size_t sending_node_index =
+ source_node_index_[msg->channel_index];
+ // The timestamp then went back from node node_index to
+ // sending_node_index. monotonic_event_time is the time it
+ // was sent, and monotonic_timestamp_time was the time it was
+ // received.
+ node_samples_[sending_node_index]
+ .nodes[node_index]
+ .messages.emplace(
+ std::make_pair(msg->monotonic_timestamp_time,
+ msg->monotonic_event_time));
+ }
}
}
});
@@ -728,9 +792,8 @@
timestamp_mappers_ = std::move(timestamp_mappers);
}
-TimeComparison CompareTimes(
- const std::vector<monotonic_clock::time_point> &ta,
- const std::vector<monotonic_clock::time_point> &tb) {
+TimeComparison CompareTimes(const std::vector<BootTimestamp> &ta,
+ const std::vector<BootTimestamp> &tb) {
if (ta.size() != tb.size() || ta.empty()) {
return TimeComparison::kInvalid;
}
@@ -739,14 +802,17 @@
bool is_eq = true;
bool some_eq = false;
for (size_t i = 0; i < ta.size(); ++i) {
- if (tb[i] == monotonic_clock::min_time ||
- ta[i] == monotonic_clock::min_time) {
+ if (tb[i] == BootTimestamp::min_time() ||
+ ta[i] == BootTimestamp::min_time()) {
continue;
}
- if (ta[i] < tb[i]) {
+ if (ta[i].boot != tb[i].boot) {
+ continue;
+ }
+ if (ta[i].time < tb[i].time) {
is_less = true;
is_eq = false;
- } else if (ta[i] > tb[i]) {
+ } else if (ta[i].time > tb[i].time) {
is_greater = true;
is_eq = false;
} else {
@@ -776,41 +842,44 @@
}
}
-chrono::nanoseconds MaxElapsedTime(
- const std::vector<monotonic_clock::time_point> &ta,
- const std::vector<monotonic_clock::time_point> &tb) {
+chrono::nanoseconds MaxElapsedTime(const std::vector<BootTimestamp> &ta,
+ const std::vector<BootTimestamp> &tb) {
CHECK_EQ(ta.size(), tb.size());
CHECK(!ta.empty());
bool first = true;
chrono::nanoseconds dt;
for (size_t i = 0; i < ta.size(); ++i) {
// Skip any invalid timestamps.
- if (ta[i] == monotonic_clock::min_time ||
- tb[i] == monotonic_clock::min_time) {
+ if (ta[i] == BootTimestamp::min_time() ||
+ tb[i] == BootTimestamp::min_time()) {
continue;
}
- const chrono::nanoseconds dti = tb[i] - ta[i];
- if (first || dti > dt) {
- dt = dti;
+ if (ta[i].boot == tb[i].boot) {
+ const chrono::nanoseconds dti = tb[i].time - ta[i].time;
+ if (first || dti > dt) {
+ dt = dti;
+ }
+ first = false;
}
- first = false;
}
+ CHECK(!first);
return dt;
}
-chrono::nanoseconds InvalidDistance(
- const std::vector<monotonic_clock::time_point> &ta,
- const std::vector<monotonic_clock::time_point> &tb) {
+chrono::nanoseconds InvalidDistance(const std::vector<BootTimestamp> &ta,
+ const std::vector<BootTimestamp> &tb) {
// Use an int128 so we have no concern about number of times or size of the
// difference.
absl::int128 sum = 0;
for (size_t i = 0; i < ta.size(); ++i) {
- if (ta[i] == monotonic_clock::min_time ||
- tb[i] == monotonic_clock::min_time) {
+ if (ta[i] == BootTimestamp::min_time() ||
+ tb[i] == BootTimestamp::min_time()) {
continue;
}
- sum += (ta[i] - tb[i]).count();
+ if (ta[i].boot == tb[i].boot) {
+ sum += (ta[i].time - tb[i].time).count();
+ }
}
// Pick the direction and sign to return.
if (sum < 0) {
@@ -1020,17 +1089,14 @@
// invalidate the point. Do this for both nodes to pick up all the
// timestamps.
if (filter.filter->has_unobserved_line()) {
- // TODO(austin): Handle boots properly...
timestamp_mappers_[node_a_index]->QueueUntil(
- BootTimestamp{.boot = 0u,
- .time = filter.filter->unobserved_line_end() +
- time_estimation_buffer_seconds_});
+ filter.filter->unobserved_line_end() +
+ time_estimation_buffer_seconds_);
if (timestamp_mappers_[node_b_index] != nullptr) {
- timestamp_mappers_[node_b_index]->QueueUntil(BootTimestamp{
- .boot = 0u,
- .time = filter.filter->unobserved_line_remote_end() +
- time_estimation_buffer_seconds_});
+ timestamp_mappers_[node_b_index]->QueueUntil(
+ filter.filter->unobserved_line_remote_end() +
+ time_estimation_buffer_seconds_);
}
}
}
@@ -1091,52 +1157,73 @@
return problem;
}
-std::tuple<NoncausalTimestampFilter *,
- std::vector<aos::monotonic_clock::time_point>, int>
+std::tuple<NoncausalTimestampFilter *, std::vector<BootTimestamp>, int>
MultiNodeNoncausalOffsetEstimator::NextSolution(
- TimestampProblem *problem,
- const std::vector<aos::monotonic_clock::time_point> &base_times) {
+ TimestampProblem *problem, const std::vector<BootTimestamp> &base_times) {
// Ok, now solve for the minimum time on each channel.
- std::vector<aos::monotonic_clock::time_point> result_times;
+ std::vector<BootTimestamp> result_times;
NoncausalTimestampFilter *next_filter = nullptr;
size_t solution_index = 0;
{
size_t node_a_index = 0;
for (const auto &filters : filters_per_node_) {
VLOG(1) << "Investigating filter for node " << node_a_index;
- monotonic_clock::time_point next_node_time = monotonic_clock::max_time;
+ BootTimestamp next_node_time = BootTimestamp::max_time();
+ BootDuration next_node_duration;
NoncausalTimestampFilter *next_node_filter = nullptr;
// Find the oldest time for each node in each filter, and solve for that
// time. That gives us the next timestamp for this node.
+ size_t filter_index = 0;
for (const auto &filter : filters) {
- std::optional<
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
- candidate = filter.filter->Observe();
+ std::optional<std::tuple<BootTimestamp, BootDuration>> candidate =
+ filter.filter->Observe();
if (candidate) {
+ VLOG(1) << "Candidate for node " << node_a_index << " filter "
+ << filter_index << " is " << std::get<0>(*candidate);
if (std::get<0>(*candidate) < next_node_time) {
next_node_time = std::get<0>(*candidate);
+ next_node_duration = std::get<1>(*candidate);
next_node_filter = filter.filter;
}
}
+ ++filter_index;
}
// Found no active filters. Either this node is off, or disconnected, or
// we are before the log file starts or after the log file ends.
- if (next_node_time == monotonic_clock::max_time) {
+ if (next_node_time == BootTimestamp::max_time()) {
++node_a_index;
continue;
}
+ VLOG(1) << "Trying " << next_node_time << " " << next_node_duration
+ << " for node " << node_a_index;
- // Optimize, and save the time into times if earlier than time.
- for (size_t node_index = 0; node_index < base_times.size();
- ++node_index) {
- // Offset everything based on the elapsed time since the last solution
- // on the node we are solving for. The rate that time elapses should be
- // ~1.
- problem->set_base_clock(
- node_index, base_times[node_index] +
- (next_node_time - base_times[node_a_index]));
+ // TODO(austin): If we start supporting only having 1 direction of
+ // timestamps, we might need to change our assumptions around
+ // BootTimestamp and BootDuration.
+
+ // If we haven't rebooted, we can seed the optimization problem with a
+ // pretty good initial guess.
+ if (next_node_time.boot == base_times[node_a_index].boot) {
+ // Optimize, and save the time into times if earlier than time.
+ for (size_t node_index = 0; node_index < base_times.size();
+ ++node_index) {
+ // Offset everything based on the elapsed time since the last solution
+ // on the node we are solving for. The rate that time elapses should
+ // be ~1.
+ problem->set_base_clock(
+ node_index,
+ {base_times[node_index].boot,
+ base_times[node_index].time +
+ (next_node_time.time - base_times[node_a_index].time)});
+ }
+ } else {
+ // Otherwise just pick the base time from before to try.
+ for (size_t node_index = 0; node_index < base_times.size();
+ ++node_index) {
+ problem->set_base_clock(node_index, base_times[node_index]);
+ }
}
problem->set_solution_node(node_a_index);
@@ -1144,9 +1231,8 @@
if (VLOG_IS_ON(2)) {
problem->Debug();
}
- // TODO(austin): Can we cache? Solving is expensive.
- std::vector<monotonic_clock::time_point> solution =
- problem->SolveNewton();
+ // TODO(austin): Solve all problems at once :)
+ std::vector<BootTimestamp> solution = problem->SolveNewton();
// Bypass checking if order validation is turned off. This lets us dump a
// CSV file so we can view the problem and figure out what to do. The
@@ -1199,7 +1285,8 @@
<< "ns";
for (size_t i = 0; i < result_times.size(); ++i) {
VLOG(1) << " " << result_times[i] << " vs " << solution[i]
- << " -> " << (result_times[i] - solution[i]).count()
+ << " -> "
+ << (result_times[i].time - solution[i].time).count()
<< "ns";
}
VLOG(1) << "Ignoring because it is close enough.";
@@ -1210,11 +1297,12 @@
// solution... This is an internal failure because that means time
// goes backwards on a node.
CHECK_EQ(result_times.size(), solution.size());
- LOG(INFO) << "Times can't be compared by "
- << InvalidDistance(result_times, solution).count() << "ns";
+ LOG(INFO) << "Times can't be compared by " << invalid_distance.count()
+ << "ns";
for (size_t i = 0; i < result_times.size(); ++i) {
LOG(INFO) << " " << result_times[i] << " vs " << solution[i]
- << " -> " << (result_times[i] - solution[i]).count()
+ << " -> "
+ << (result_times[i].time - solution[i].time).count()
<< "ns";
}
@@ -1240,15 +1328,15 @@
return std::make_tuple(next_filter, std::move(result_times), solution_index);
}
-std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+std::optional<
+ std::tuple<distributed_clock::time_point, std::vector<BootTimestamp>>>
MultiNodeNoncausalOffsetEstimator::NextTimestamp() {
// TODO(austin): Detect and handle there being fewer nodes in the log file
// than in replay, or them being in a different order.
TimestampProblem problem = MakeProblem();
// Ok, now solve for the minimum time on each channel.
- std::vector<aos::monotonic_clock::time_point> result_times;
+ std::vector<BootTimestamp> result_times;
NoncausalTimestampFilter *next_filter = nullptr;
int solution_node_index = 0;
std::tie(next_filter, result_times, solution_node_index) =
@@ -1272,9 +1360,9 @@
}
fprintf(fp_, "\n");
}
- return std::make_tuple(distributed_clock::epoch(),
- std::vector<monotonic_clock::time_point>(
- NodesCount(), monotonic_clock::epoch()));
+ return std::make_tuple(
+ distributed_clock::epoch(),
+ std::vector<BootTimestamp>(NodesCount(), BootTimestamp::epoch()));
}
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Found no more timestamps.";
@@ -1295,31 +1383,21 @@
return std::nullopt;
}
+
+ std::tuple<logger::BootTimestamp, logger::BootDuration> sample;
if (first_solution_) {
- std::vector<aos::monotonic_clock::time_point> resolved_times;
- NoncausalTimestampFilter *resolved_next_filter = nullptr;
- int resolved_solution_node_index = 0;
-
- VLOG(1) << "Resolving with updated base times for accuracy.";
- std::tie(resolved_next_filter, resolved_times,
- resolved_solution_node_index) =
- NextSolution(&problem, result_times);
-
first_solution_ = false;
- next_filter = resolved_next_filter;
- solution_node_index = resolved_solution_node_index;
// Force any unknown nodes to track the distributed clock (which starts at 0
// too).
- for (monotonic_clock::time_point &time : result_times) {
- if (time == monotonic_clock::min_time) {
- time = monotonic_clock::epoch();
+ for (BootTimestamp &time : result_times) {
+ if (time == BootTimestamp::min_time()) {
+ time = BootTimestamp::epoch();
}
}
- result_times = std::move(resolved_times);
- next_filter->Consume();
+ sample = *next_filter->Consume();
} else {
- next_filter->Consume();
+ sample = *next_filter->Consume();
// We found a good sample, so consume it. If it is a duplicate, we still
// want to consume it. But, if this is the first time around, we want to
// re-solve by recursing (once) to pickup the better base.
@@ -1332,7 +1410,8 @@
problem.Debug();
for (size_t i = 0; i < result_times.size(); ++i) {
LOG(INFO) << " " << last_monotonics_[i] << " vs " << result_times[i]
- << " -> " << (last_monotonics_[i] - result_times[i]).count()
+ << " -> "
+ << (last_monotonics_[i].time - result_times[i].time).count()
<< "ns";
}
LOG(FATAL)
@@ -1353,7 +1432,8 @@
CHECK_EQ(last_monotonics_.size(), result_times.size());
for (size_t i = 0; i < result_times.size(); ++i) {
LOG(INFO) << " " << last_monotonics_[i] << " vs " << result_times[i]
- << " -> " << (last_monotonics_[i] - result_times[i]).count()
+ << " -> "
+ << (last_monotonics_[i].time - result_times[i].time).count()
<< "ns";
}
LOG(FATAL) << "Please investigate. Use --max_invalid_distance_ns="
@@ -1368,7 +1448,7 @@
const chrono::nanoseconds dt = MaxElapsedTime(last_monotonics_, result_times);
last_distributed_ += dt;
for (size_t i = 0; i < result_times.size(); ++i) {
- if (result_times[i] == monotonic_clock::min_time) {
+ if (result_times[i] == BootTimestamp::min_time()) {
// Found an unknown node. Move its time along by the amount the
// distributed clock moved.
result_times[i] = last_monotonics_[i] + dt;
@@ -1381,26 +1461,147 @@
size_t node_index = 0;
for (const auto &filters : filters_per_node_) {
for (const auto &filter : filters) {
- filter.filter->FreezeUntil(last_monotonics_[node_index]);
- filter.filter->FreezeUntilRemote(last_monotonics_[filter.b_index]);
+ filter.filter->FreezeUntil(last_monotonics_[node_index],
+ last_monotonics_[filter.b_index]);
}
++node_index;
}
}
+ if (filter_fps_.size() > 0) {
+ const int node_a_index =
+ configuration::GetNodeIndex(configuration(), next_filter->node_a());
+ const int node_b_index =
+ configuration::GetNodeIndex(configuration(), next_filter->node_b());
+
+ FILE *fp = filter_fps_[node_a_index][node_b_index];
+ if (fp == nullptr) {
+ fp = filter_fps_[node_a_index][node_b_index] = fopen(
+ absl::StrCat("/tmp/timestamp_noncausal_",
+ next_filter->node_a()->name()->string_view(), "_",
+ next_filter->node_b()->name()->string_view(), ".csv")
+ .c_str(),
+ "w");
+ fprintf(fp, "time_since_start,sample_ns,filtered_offset\n");
+ }
+
+ fprintf(fp, "%.9f, %.9f, %.9f\n",
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ last_distributed_.time_since_epoch())
+ .count(),
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::get<0>(sample).time.time_since_epoch())
+ .count(),
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::get<1>(sample).duration)
+ .count());
+ }
+
if (fp_) {
fprintf(
fp_, "%.9f",
chrono::duration<double>(last_distributed_.time_since_epoch()).count());
- for (const monotonic_clock::time_point t : last_monotonics_) {
+ for (const BootTimestamp t : last_monotonics_) {
fprintf(fp_, ", %.9f",
- chrono::duration<double>(t.time_since_epoch()).count());
+ chrono::duration<double>(t.time.time_since_epoch()).count());
}
fprintf(fp_, "\n");
}
-
+ FlushAllSamples(false);
return std::make_tuple(last_distributed_, last_monotonics_);
}
+void MultiNodeNoncausalOffsetEstimator::FlushAllSamples(bool finish) {
+ size_t node_index = 0;
+ for (NodeSamples &node_samples : node_samples_) {
+ size_t sending_node_index = 0;
+ for (SingleNodeSamples &samples : node_samples.nodes) {
+ if (samples.messages.size() == 0) {
+ ++sending_node_index;
+ continue;
+ }
+
+ FILE *samples_fp = sample_fps_[node_index][sending_node_index];
+ if (samples_fp == nullptr) {
+ samples_fp = sample_fps_[node_index][sending_node_index] =
+ fopen(absl::StrCat("/tmp/timestamp_noncausal_",
+ logged_configuration()
+ ->nodes()
+ ->Get(node_index)
+ ->name()
+ ->string_view(),
+ "_",
+ logged_configuration()
+ ->nodes()
+ ->Get(sending_node_index)
+ ->name()
+ ->string_view(),
+ "_samples.csv")
+ .c_str(),
+ "w");
+ fprintf(samples_fp,
+ "time_since_start,sample_ns,monotonic,monotonic+offset("
+ "remote)\n");
+ }
+
+ auto times_it = times_.begin();
+ while (!samples.messages.empty() && times_it != times_.end()) {
+ const std::pair<BootTimestamp, BootTimestamp> &message =
+ *samples.messages.begin();
+ auto next = times_it + 1;
+ while (next != times_.end()) {
+ if (std::get<1>(*next)[node_index] < message.first) {
+ times_it = next;
+ next = times_it + 1;
+ } else {
+ break;
+ }
+ }
+
+ distributed_clock::time_point distributed;
+ const distributed_clock::time_point d0 = std::get<0>(*times_it);
+ const BootTimestamp t0 = std::get<1>(*times_it)[node_index];
+ if (next == times_.end()) {
+ if (!finish) {
+ break;
+ }
+ CHECK_EQ(t0.boot, message.first.boot);
+ distributed = message.first.time - t0.time + d0;
+ } else {
+ const distributed_clock::time_point d1 = std::get<0>(*next);
+ const BootTimestamp t1 = std::get<1>(*next)[node_index];
+ if (t0.boot == t1.boot) {
+ distributed = ::aos::message_bridge::ToDistributedClock(
+ d0, d1, t0.time, t1.time, message.first.time);
+ } else if (t0.boot == message.first.boot) {
+ distributed = message.first.time - t0.time + d0;
+ } else if (t1.boot == message.first.boot) {
+ distributed = message.first.time - t1.time + d1;
+ } else {
+ LOG(FATAL) << "Boots don't match";
+ }
+ }
+ fprintf(samples_fp, "%.9f, %.9f, %.9f, %.9f\n",
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ distributed.time_since_epoch())
+ .count(),
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ message.second.time - message.first.time)
+ .count(),
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ message.first.time.time_since_epoch())
+ .count(),
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ message.second.time.time_since_epoch())
+ .count());
+
+ samples.messages.erase(samples.messages.begin());
+ }
+ ++sending_node_index;
+ }
+ ++node_index;
+ }
+}
+
} // namespace message_bridge
} // namespace aos
diff --git a/aos/network/multinode_timestamp_filter.h b/aos/network/multinode_timestamp_filter.h
index 96f7228..a52db7d 100644
--- a/aos/network/multinode_timestamp_filter.h
+++ b/aos/network/multinode_timestamp_filter.h
@@ -6,6 +6,7 @@
#include <string_view>
#include "Eigen/Dense"
+#include "absl/container/btree_set.h"
#include "aos/configuration.h"
#include "aos/events/logging/logfile_utils.h"
#include "aos/events/simulated_event_loop.h"
@@ -43,12 +44,8 @@
size_t solution_node() const { return solution_node_; }
// Sets and gets the base time for a node.
- void set_base_clock(size_t i, monotonic_clock::time_point t) {
- base_clock_[i] = t;
- }
- monotonic_clock::time_point base_clock(size_t i) const {
- return base_clock_[i];
- }
+ void set_base_clock(size_t i, logger::BootTimestamp t) { base_clock_[i] = t; }
+ logger::BootTimestamp base_clock(size_t i) const { return base_clock_[i]; }
// Adds a timestamp filter from a -> b.
// filter[a_index]->Offset(ta) + ta => t(b_index);
@@ -59,11 +56,11 @@
// Solves the optimization problem phrased using the symmetric Netwon's method
// solver and returns the optimal time on each node.
- std::vector<monotonic_clock::time_point> SolveNewton();
+ std::vector<logger::BootTimestamp> SolveNewton();
// Validates the solution, returning true if it meets all the constraints, and
// false otherwise.
- bool ValidateSolution(std::vector<monotonic_clock::time_point> solution);
+ bool ValidateSolution(std::vector<logger::BootTimestamp> solution);
// LOGs a representation of the problem.
void Debug();
@@ -127,7 +124,7 @@
// The optimization problem is solved as base_clock + time_offsets to minimize
// numerical precision problems. This contains all the base times. The base
// time corresponding to solution_node is fixed and not solved.
- std::vector<monotonic_clock::time_point> base_clock_;
+ std::vector<logger::BootTimestamp> base_clock_;
std::vector<bool> live_;
// True if both node_mapping_ and live_nodes_ are valid.
@@ -169,6 +166,7 @@
// Converts a time to the distributed clock for scheduling and cross-node
// time measurement.
+ // TODO(austin): Need to pass in boot.
distributed_clock::time_point ToDistributedClock(
size_t node_index, monotonic_clock::time_point time) override;
@@ -180,6 +178,12 @@
// Called whenever time passes this point and we can forget about it.
void ObserveTimePassed(distributed_clock::time_point time) override;
+ // Queues 1 more timestammp in the interpolation list. This is public for
+ // timestamp_extractor so it can hammer on the log until everything is queued.
+ std::optional<const std::tuple<distributed_clock::time_point,
+ std::vector<logger::BootTimestamp>> *>
+ QueueNextTimestamp();
+
private:
// Returns the next timestamp, or nullopt if there isn't one. It is assumed
// that if there isn't one, there never will be one.
@@ -187,7 +191,7 @@
// on every monotonic clock for all the nodes in the factory that this will be
// hooked up to.
virtual std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+ std::vector<logger::BootTimestamp>>>
NextTimestamp() = 0;
// Queues timestamps util the last time in the queue matches the provided
@@ -195,22 +199,22 @@
void QueueUntil(
std::function<
bool(const std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>> &)>
+ std::vector<logger::BootTimestamp>> &)>
not_done);
// The number of nodes to enforce.
const size_t node_count_;
+ protected:
// List of timestamps.
std::deque<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+ std::vector<logger::BootTimestamp>>>
times_;
// If true, we have popped data from times_, so anything before the start is
// unknown.
bool have_popped_ = false;
- protected:
// The amount of time to buffer when estimating. We care so we don't throw
// data out of our queue too soon. This time is indicative of how much to
// buffer everywhere, so let's latch onto it as well until proven that there
@@ -226,21 +230,28 @@
enum class TimeComparison { kBefore, kAfter, kInvalid, kEq };
// Compares two sets of times, optionally ignoring times that are min_time
-TimeComparison CompareTimes(const std::vector<monotonic_clock::time_point> &ta,
- const std::vector<monotonic_clock::time_point> &tb);
+TimeComparison CompareTimes(const std::vector<logger::BootTimestamp> &ta,
+ const std::vector<logger::BootTimestamp> &tb);
// Returns the maximum amount of elapsed time between the two samples in time.
std::chrono::nanoseconds MaxElapsedTime(
- const std::vector<monotonic_clock::time_point> &ta,
- const std::vector<monotonic_clock::time_point> &tb);
+ const std::vector<logger::BootTimestamp> &ta,
+ const std::vector<logger::BootTimestamp> &tb);
// Returns the amount of time by which ta and tb are out of order. The primary
// direction is defined to be the direction of the average of the offsets. So,
// if the average is +, and we get a -ve outlier, the absolute value of that -ve
// outlier is the invalid distance.
std::chrono::nanoseconds InvalidDistance(
- const std::vector<monotonic_clock::time_point> &ta,
- const std::vector<monotonic_clock::time_point> &tb);
+ const std::vector<logger::BootTimestamp> &ta,
+ const std::vector<logger::BootTimestamp> &tb);
+
+// Interpolates a monotonic time to a distributed time without loss of
+// precision. Implements (d1 - d0) / (t1 - t0) * (time - t0) + d0;
+distributed_clock::time_point ToDistributedClock(
+ distributed_clock::time_point d0, distributed_clock::time_point d1,
+ monotonic_clock::time_point t0, monotonic_clock::time_point t1,
+ monotonic_clock::time_point time);
// Class to hold a NoncausalOffsetEstimator per pair of communicating nodes, and
// to estimate and set the overall time of all nodes.
@@ -283,7 +294,7 @@
std::vector<logger::TimestampMapper *> timestamp_mappers);
std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+ std::vector<logger::BootTimestamp>>>
NextTimestamp() override;
// Checks that all the nodes in the graph are connected. Needs all filters to
@@ -315,10 +326,13 @@
private:
TimestampProblem MakeProblem();
- std::tuple<NoncausalTimestampFilter *,
- std::vector<aos::monotonic_clock::time_point>, int>
+ std::tuple<NoncausalTimestampFilter *, std::vector<logger::BootTimestamp>,
+ int>
NextSolution(TimestampProblem *problem,
- const std::vector<aos::monotonic_clock::time_point> &base_times);
+ const std::vector<logger::BootTimestamp> &base_times);
+
+ // Writes all samples to disk.
+ void FlushAllSamples(bool finish);
const Configuration *configuration_;
const Configuration *logged_configuration_;
@@ -343,7 +357,7 @@
std::vector<std::vector<FilterPair>> filters_per_node_;
distributed_clock::time_point last_distributed_ = distributed_clock::epoch();
- std::vector<aos::monotonic_clock::time_point> last_monotonics_;
+ std::vector<logger::BootTimestamp> last_monotonics_;
// A mapping from node and channel to the relevant estimator.
std::vector<std::vector<NoncausalOffsetEstimator *>> filters_per_channel_;
@@ -353,7 +367,39 @@
bool first_solution_ = true;
bool all_done_ = false;
+ // Optional file pointers to save the results of the noncausal filter in. This
+ // lives here so we can give each sample a distributed clock.
+ std::vector<std::vector<FILE *>> filter_fps_;
+ // Optional file pointers to save all the samples into.
+ std::vector<std::vector<FILE *>> sample_fps_;
+
FILE *fp_ = NULL;
+
+ struct SingleNodeSamples {
+ struct CompareTimestamps {
+ bool operator()(
+ const std::pair<logger::BootTimestamp, logger::BootTimestamp> &a,
+ const std::pair<logger::BootTimestamp, logger::BootTimestamp> &b)
+ const {
+ return a.first < b.first;
+ }
+ };
+
+ // Delivered, sent timestamps for each message.
+ absl::btree_set<std::pair<logger::BootTimestamp, logger::BootTimestamp>,
+ CompareTimestamps>
+ messages;
+ };
+
+ struct NodeSamples {
+ // List of nodes sending.
+ std::vector<SingleNodeSamples> nodes;
+ };
+
+ // List of nodes where data is delivered.
+ std::vector<NodeSamples> node_samples_;
+ // Mapping from channel to the node_index of the source node.
+ std::vector<size_t> source_node_index_;
};
} // namespace message_bridge
diff --git a/aos/network/multinode_timestamp_filter_test.cc b/aos/network/multinode_timestamp_filter_test.cc
index bdc1992..f67d8de 100644
--- a/aos/network/multinode_timestamp_filter_test.cc
+++ b/aos/network/multinode_timestamp_filter_test.cc
@@ -15,21 +15,22 @@
namespace chrono = std::chrono;
using aos::monotonic_clock;
+using aos::logger::BootTimestamp;
// Tests solution time(s) comparison and measure of invalid / inconsistent times
TEST(TimestampProblemTest, CompareTimes) {
- const monotonic_clock::time_point e = monotonic_clock::epoch();
+ const BootTimestamp e = BootTimestamp::epoch();
// Create two sets of times, offset by 1000ns
- std::vector<monotonic_clock::time_point> time_list;
+ std::vector<BootTimestamp> time_list;
for (int i = 0; i < 10; i++) {
time_list.push_back(e + std::chrono::nanoseconds(i * 1000));
}
- std::vector<monotonic_clock::time_point> times_a = {time_list.begin(),
- time_list.end() - 1u};
- std::vector<monotonic_clock::time_point> times_b = {time_list.begin() + 1u,
- time_list.end()};
+ std::vector<BootTimestamp> times_a = {time_list.begin(),
+ time_list.end() - 1u};
+ std::vector<BootTimestamp> times_b = {time_list.begin() + 1u,
+ time_list.end()};
CHECK_EQ(static_cast<int>(CompareTimes(times_a, times_b)),
static_cast<int>(TimeComparison::kBefore));
@@ -41,8 +42,8 @@
static_cast<int>(TimeComparison::kEq));
// Now try one of the times being min_time.
- std::vector<monotonic_clock::time_point> times_b_min = times_b;
- times_b_min[5] = monotonic_clock::min_time;
+ std::vector<BootTimestamp> times_b_min = times_b;
+ times_b_min[5] = BootTimestamp::min_time();
CHECK_EQ(static_cast<int>(CompareTimes(times_a, times_b_min)),
static_cast<int>(TimeComparison::kBefore));
@@ -50,7 +51,7 @@
static_cast<int>(TimeComparison::kAfter));
// Test if one of the elements is equal
- std::vector<monotonic_clock::time_point> times_b_some_eq = times_b_min;
+ std::vector<BootTimestamp> times_b_some_eq = times_b_min;
times_b_some_eq[2] = times_a[2];
CHECK_EQ(static_cast<int>(CompareTimes(times_a, times_b_some_eq)),
@@ -59,7 +60,7 @@
static_cast<int>(TimeComparison::kInvalid));
// Test if elements are out of order
- std::vector<monotonic_clock::time_point> times_b_mixed = times_b_min;
+ std::vector<BootTimestamp> times_b_mixed = times_b_min;
times_b_mixed[3] = times_a[0];
CHECK_EQ(static_cast<int>(CompareTimes(times_a, times_b_mixed)),
@@ -86,8 +87,9 @@
TestingTimeConverter time_converter(3u);
time_converter.AddNextTimestamp(
de + chrono::seconds(0),
- {me + chrono::seconds(1), me + chrono::seconds(10),
- me + chrono::seconds(1000)});
+ {{.boot = 0, .time = me + chrono::seconds(1)},
+ {.boot = 0, .time = me + chrono::seconds(10)},
+ {.boot = 0, .time = me + chrono::seconds(1000)}});
EXPECT_EQ(time_converter.FromDistributedClock(0, de - chrono::seconds(1)),
me + chrono::seconds(0));
@@ -123,12 +125,14 @@
// Test that 2 timestamps interpolate correctly.
time_converter.AddNextTimestamp(
de + chrono::seconds(0),
- {me + chrono::seconds(1), me + chrono::seconds(10),
- me + chrono::seconds(1000)});
+ {{.boot = 0, .time = me + chrono::seconds(1)},
+ {.boot = 0, .time = me + chrono::seconds(10)},
+ {.boot = 0, .time = me + chrono::seconds(1000)}});
time_converter.AddNextTimestamp(
de + chrono::seconds(1),
- {me + chrono::seconds(2), me + chrono::seconds(11),
- me + chrono::seconds(1001)});
+ {{.boot = 0, .time = me + chrono::seconds(2)},
+ {.boot = 0, .time = me + chrono::seconds(11)},
+ {.boot = 0, .time = me + chrono::seconds(1001)}});
EXPECT_EQ(
time_converter.FromDistributedClock(0, de + chrono::milliseconds(500)),
@@ -152,15 +156,16 @@
// And that we can interpolate between points not at the start.
time_converter.AddNextTimestamp(
de + chrono::seconds(2),
- {me + chrono::seconds(3) - chrono::milliseconds(2),
- me + chrono::seconds(12) - chrono::milliseconds(2),
- me + chrono::seconds(1002)});
+ {{.boot = 0, .time = me + chrono::seconds(3) - chrono::milliseconds(2)},
+ {.boot = 0, .time = me + chrono::seconds(12) - chrono::milliseconds(2)},
+ {.boot = 0, .time = me + chrono::seconds(1002)}});
time_converter.AddNextTimestamp(
de + chrono::seconds(3),
- {me + chrono::seconds(4) - chrono::milliseconds(4),
- me + chrono::seconds(13) - chrono::milliseconds(2),
- me + chrono::seconds(1003) - chrono::milliseconds(2)});
+ {{.boot = 0, .time = me + chrono::seconds(4) - chrono::milliseconds(4)},
+ {.boot = 0, .time = me + chrono::seconds(13) - chrono::milliseconds(2)},
+ {.boot = 0,
+ .time = me + chrono::seconds(1003) - chrono::milliseconds(2)}});
EXPECT_EQ(
time_converter.FromDistributedClock(0, de + chrono::milliseconds(2500)),
@@ -243,8 +248,8 @@
const monotonic_clock::time_point me = monotonic_clock::epoch();
TestingTimeConverter time_converter(1u);
- time_converter.AddNextTimestamp(de + chrono::seconds(0),
- {me + chrono::seconds(1)});
+ time_converter.AddNextTimestamp(
+ de + chrono::seconds(0), {{.boot = 0, .time = me + chrono::seconds(1)}});
EXPECT_EQ(time_converter.FromDistributedClock(0, de), me);
EXPECT_EQ(time_converter.FromDistributedClock(0, de + chrono::seconds(100)),
@@ -265,20 +270,20 @@
JsonToFlatbuffer<Node>("{\"name\": \"test_b\"}");
const Node *const node_b = &node_b_buffer.message();
- const monotonic_clock::time_point e = monotonic_clock::epoch();
- const monotonic_clock::time_point ta = e + chrono::milliseconds(500);
+ const BootTimestamp e{0, monotonic_clock::epoch()};
+ const BootTimestamp ta = e + chrono::milliseconds(500);
// Setup a time problem with an interesting shape that isn't simple and
// parallel.
NoncausalTimestampFilter a(node_a, node_b);
- a.Sample(e, chrono::milliseconds(1002));
- a.Sample(e + chrono::milliseconds(1000), chrono::milliseconds(1001));
- a.Sample(e + chrono::milliseconds(3000), chrono::milliseconds(999));
+ a.Sample(e, {0, chrono::milliseconds(1002)});
+ a.Sample(e + chrono::milliseconds(1000), {0, chrono::milliseconds(1001)});
+ a.Sample(e + chrono::milliseconds(3000), {0, chrono::milliseconds(999)});
NoncausalTimestampFilter b(node_b, node_a);
- b.Sample(e + chrono::milliseconds(1000), -chrono::milliseconds(999));
- b.Sample(e + chrono::milliseconds(2000), -chrono::milliseconds(1000));
- b.Sample(e + chrono::milliseconds(4000), -chrono::milliseconds(1002));
+ b.Sample(e + chrono::milliseconds(1000), {0, -chrono::milliseconds(999)});
+ b.Sample(e + chrono::milliseconds(2000), {0, -chrono::milliseconds(1000)});
+ b.Sample(e + chrono::milliseconds(4000), {0, -chrono::milliseconds(1002)});
TimestampProblem problem(2);
problem.set_base_clock(0, ta);
@@ -293,11 +298,11 @@
problem.set_base_clock(1, e);
problem.set_solution_node(0);
- std::vector<monotonic_clock::time_point> result1 = problem.SolveNewton();
+ std::vector<BootTimestamp> result1 = problem.SolveNewton();
problem.set_base_clock(1, result1[1]);
problem.set_solution_node(1);
- std::vector<monotonic_clock::time_point> result2 = problem.SolveNewton();
+ std::vector<BootTimestamp> result2 = problem.SolveNewton();
EXPECT_EQ(result1[0], e + chrono::seconds(1));
EXPECT_EQ(result1[0], result2[0]);
diff --git a/aos/network/testing_time_converter.cc b/aos/network/testing_time_converter.cc
index 0dd0cb3..9558033 100644
--- a/aos/network/testing_time_converter.cc
+++ b/aos/network/testing_time_converter.cc
@@ -16,7 +16,7 @@
TestingTimeConverter ::TestingTimeConverter(size_t node_count)
: InterpolatedTimeConverter(node_count),
- last_monotonic_(node_count, monotonic_clock::epoch()) {
+ last_monotonic_(node_count, logger::BootTimestamp::epoch()) {
CHECK_GE(node_count, 1u);
}
@@ -37,7 +37,7 @@
CHECK_EQ(times.size(), last_monotonic_.size());
for (size_t i = 0; i < times.size(); ++i) {
CHECK_GT(times[i].count(), 0);
- last_monotonic_[i] += times[i];
+ last_monotonic_[i].time += times[i];
}
chrono::nanoseconds dt(0);
if (!first_) {
@@ -51,14 +51,15 @@
}
chrono::nanoseconds TestingTimeConverter::AddMonotonic(
- std::vector<monotonic_clock::time_point> times) {
+ std::vector<logger::BootTimestamp> times) {
CHECK_EQ(times.size(), last_monotonic_.size());
chrono::nanoseconds dt(0);
if (!first_) {
- dt = times[0] - last_monotonic_[0];
+ CHECK_EQ(times[0].boot, last_monotonic_[0].boot);
+ dt = times[0].time - last_monotonic_[0].time;
for (size_t i = 0; i < times.size(); ++i) {
CHECK_GT(times[i], last_monotonic_[i]);
- dt = std::max(dt, times[i] - times[0]);
+ dt = std::max(dt, times[i].time - times[0].time);
}
last_distributed_ += dt;
last_monotonic_ = times;
@@ -72,7 +73,7 @@
void TestingTimeConverter::AddNextTimestamp(
distributed_clock::time_point time,
- std::vector<monotonic_clock::time_point> times) {
+ std::vector<logger::BootTimestamp> times) {
CHECK_EQ(times.size(), last_monotonic_.size());
if (!first_) {
CHECK_GT(time, last_distributed_);
@@ -89,7 +90,7 @@
}
std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+ std::vector<logger::BootTimestamp>>>
TestingTimeConverter::NextTimestamp() {
CHECK(!first_) << ": Tried to pull a timestamp before one was added. This "
"is unlikely to be what you want.";
diff --git a/aos/network/testing_time_converter.h b/aos/network/testing_time_converter.h
index 6d9fd8f..5ffdc01 100644
--- a/aos/network/testing_time_converter.h
+++ b/aos/network/testing_time_converter.h
@@ -32,27 +32,27 @@
// duration that the distributed clock elapsed by. Note: time must always go
// forwards.
std::chrono::nanoseconds AddMonotonic(
- std::vector<monotonic_clock::time_point> times);
+ std::vector<logger::BootTimestamp> times);
// Adds a distributed to monotonic clock mapping to the queue.
void AddNextTimestamp(distributed_clock::time_point time,
- std::vector<monotonic_clock::time_point> times);
+ std::vector<logger::BootTimestamp> times);
std::optional<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+ std::vector<logger::BootTimestamp>>>
NextTimestamp() override;
private:
// List of timestamps.
std::deque<std::tuple<distributed_clock::time_point,
- std::vector<monotonic_clock::time_point>>>
+ std::vector<logger::BootTimestamp>>>
ts_;
// True if there is no time queued.
bool first_ = true;
// The last times returned on all clocks.
distributed_clock::time_point last_distributed_ = distributed_clock::epoch();
- std::vector<monotonic_clock::time_point> last_monotonic_;
+ std::vector<logger::BootTimestamp> last_monotonic_;
};
} // namespace message_bridge
diff --git a/aos/network/timestamp_filter.cc b/aos/network/timestamp_filter.cc
index 1041b97..59af3a0 100644
--- a/aos/network/timestamp_filter.cc
+++ b/aos/network/timestamp_filter.cc
@@ -35,15 +35,6 @@
"sample_contribution, time_contribution\n");
}
-void PrintNoncausalTimestampFilterHeader(FILE *fp) {
- fprintf(fp, "time_since_start,sample_ns,filtered_offset\n");
-}
-
-void PrintNoncausalTimestampFilterSamplesHeader(FILE *fp) {
- fprintf(fp,
- "time_since_start,sample_ns,monotonic,monotonic+offset(remote)\n");
-}
-
void NormalizeTimestamps(monotonic_clock::time_point *ta_base, double *ta) {
chrono::nanoseconds ta_digits(static_cast<int64_t>(std::floor(*ta)));
*ta_base += ta_digits;
@@ -64,6 +55,9 @@
CHECK_GE(*ta, 0.0);
CHECK_LT(*ta, 1.0);
}
+void NormalizeTimestamps(logger::BootTimestamp *ta_base, double *ta) {
+ NormalizeTimestamps(&ta_base->time, ta);
+}
} // namespace
@@ -469,20 +463,9 @@
}
}
-NoncausalTimestampFilter::~NoncausalTimestampFilter() {
- // Destroy the filter by popping until empty. This will trigger any
- // timestamps to be written to the files.
- while (timestamps_.size() != 0u) {
- PopFront();
- }
- if (fp_) {
- fclose(fp_);
- }
+NoncausalTimestampFilter::SingleFilter::~SingleFilter() {}
- if (samples_fp_) {
- fclose(samples_fp_);
- }
-}
+NoncausalTimestampFilter::~NoncausalTimestampFilter() {}
std::tuple<monotonic_clock::time_point, chrono::nanoseconds> TrimTuple(
std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds, bool>
@@ -490,30 +473,23 @@
return std::make_tuple(std::get<0>(t), std::get<1>(t));
}
-void NoncausalTimestampFilter::FlushSavedSamples() {
- for (const std::tuple<aos::monotonic_clock::time_point,
- std::chrono::nanoseconds> &sample : saved_samples_) {
- fprintf(samples_fp_, "%.9f, %.9f, %.9f, %.9f\n",
- chrono::duration_cast<chrono::duration<double>>(
- std::get<0>(sample) - first_time_)
- .count(),
- chrono::duration_cast<chrono::duration<double>>(std::get<1>(sample))
- .count(),
- chrono::duration_cast<chrono::duration<double>>(
- std::get<0>(sample).time_since_epoch())
- .count(),
- chrono::duration_cast<chrono::duration<double>>(
- (std::get<0>(sample) + std::get<1>(sample)).time_since_epoch())
- .count());
- }
- fflush(samples_fp_);
- saved_samples_.clear();
+std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
+ std::tuple<logger::BootTimestamp, logger::BootDuration>>
+NoncausalTimestampFilter::FindTimestamps(logger::BootTimestamp ta_base,
+ double ta, size_t sample_boot) const {
+ CHECK_GE(ta, 0.0);
+ CHECK_LT(ta, 1.0);
+
+ // Since ta is less than an integer, and timestamps should be at least 1 ns
+ // apart, we can ignore ta if we make sure that the end of the segment is
+ // strictly > than ta_base.
+ return FindTimestamps(ta_base, sample_boot);
}
std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>
-NoncausalTimestampFilter::FindTimestamps(monotonic_clock::time_point ta_base,
- double ta) const {
+NoncausalTimestampFilter::SingleFilter::FindTimestamps(
+ monotonic_clock::time_point ta_base, double ta) const {
CHECK_GE(ta, 0.0);
CHECK_LT(ta, 1.0);
@@ -525,7 +501,8 @@
std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>
-NoncausalTimestampFilter::FindTimestamps(monotonic_clock::time_point ta) const {
+NoncausalTimestampFilter::SingleFilter::FindTimestamps(
+ monotonic_clock::time_point ta) const {
CHECK_GT(timestamps_size(), 1u);
auto it = std::upper_bound(
timestamps_.begin() + 1, timestamps_.end() - 1, ta,
@@ -661,7 +638,7 @@
}
}
-bool NoncausalTimestampFilter::IsOutsideSamples(
+bool NoncausalTimestampFilter::SingleFilter::IsOutsideSamples(
monotonic_clock::time_point ta_base, double ta) const {
DCHECK_GE(ta, 0.0);
DCHECK_LT(ta, 1.0);
@@ -673,7 +650,7 @@
return false;
}
-bool NoncausalTimestampFilter::IsAfterSamples(
+bool NoncausalTimestampFilter::SingleFilter::IsAfterSamples(
monotonic_clock::time_point ta_base, double ta) const {
DCHECK_GE(ta, 0.0);
DCHECK_LT(ta, 1.0);
@@ -685,7 +662,7 @@
}
std::tuple<monotonic_clock::time_point, chrono::nanoseconds>
-NoncausalTimestampFilter::GetReferenceTimestamp(
+NoncausalTimestampFilter::SingleFilter::GetReferenceTimestamp(
monotonic_clock::time_point ta_base, double ta) const {
DCHECK_GE(ta, 0.0);
DCHECK_LT(ta, 1.0);
@@ -698,7 +675,7 @@
return reference_timestamp;
}
-chrono::nanoseconds NoncausalTimestampFilter::Offset(
+chrono::nanoseconds NoncausalTimestampFilter::SingleFilter::Offset(
monotonic_clock::time_point ta) const {
CHECK_GT(timestamps_size(), 0u);
if (IsOutsideSamples(ta, 0.)) {
@@ -715,7 +692,8 @@
points.second, ta);
}
-std::pair<chrono::nanoseconds, double> NoncausalTimestampFilter::Offset(
+std::pair<chrono::nanoseconds, double>
+NoncausalTimestampFilter::SingleFilter::Offset(
monotonic_clock::time_point ta_base, double ta) const {
CHECK_GT(timestamps_size(), 0u);
if (IsOutsideSamples(ta_base, ta)) {
@@ -739,7 +717,7 @@
points.first, points.second, ta_base, ta));
}
-double NoncausalTimestampFilter::OffsetError(
+double NoncausalTimestampFilter::SingleFilter::OffsetError(
aos::monotonic_clock::time_point ta_base, double ta,
aos::monotonic_clock::time_point tb_base, double tb) const {
NormalizeTimestamps(&ta_base, &ta);
@@ -754,17 +732,18 @@
}
std::string NoncausalTimestampFilter::DebugOffsetError(
- aos::monotonic_clock::time_point ta_base, double ta,
- aos::monotonic_clock::time_point tb_base, double tb, size_t node_a,
- size_t node_b) const {
+ logger::BootTimestamp ta_base, double ta, logger::BootTimestamp tb_base,
+ double tb, size_t node_a, size_t node_b) const {
NormalizeTimestamps(&ta_base, &ta);
NormalizeTimestamps(&tb_base, &tb);
- if (IsOutsideSamples(ta_base, ta)) {
- auto reference_timestamp = GetReferenceTimestamp(ta_base, ta);
+ const SingleFilter *f = filter(ta_base.boot, tb_base.boot);
+
+ if (f->IsOutsideSamples(ta_base.time, ta)) {
+ auto reference_timestamp = f->GetReferenceTimestamp(ta_base.time, ta);
double slope = kMaxVelocity();
std::string note = "_";
- if (IsAfterSamples(ta_base, ta)) {
+ if (f->IsAfterSamples(ta_base.time, ta)) {
slope = -kMaxVelocity();
note = "^";
}
@@ -778,7 +757,7 @@
std::pair<std::tuple<monotonic_clock::time_point, chrono::nanoseconds>,
std::tuple<monotonic_clock::time_point, chrono::nanoseconds>>
- points = FindTimestamps(ta_base, ta);
+ points = f->FindTimestamps(ta_base.time, ta);
// As a reminder, our cost function is essentially:
// ((tb - ta - (ma ta + ba))^2
@@ -810,12 +789,12 @@
node_b_->name()->string_view());
}
-bool NoncausalTimestampFilter::ValidateSolution(
+bool NoncausalTimestampFilter::SingleFilter::ValidateSolution(
aos::monotonic_clock::time_point ta,
aos::monotonic_clock::time_point tb) const {
CHECK_GT(timestamps_size(), 0u);
if (ta < std::get<0>(timestamp(0)) && has_popped_) {
- LOG(ERROR) << NodeNames() << " O(" << ta
+ LOG(ERROR) << node_names_ << " O(" << ta
<< ") is before the start and we have forgotten the answer.";
return false;
}
@@ -828,7 +807,7 @@
const chrono::nanoseconds offset =
NoncausalTimestampFilter::ExtrapolateOffset(reference_timestamp, ta);
if (offset + ta > tb) {
- LOG(ERROR) << NodeNames() << " " << TimeString(ta, offset)
+ LOG(ERROR) << node_names_ << " " << TimeString(ta, offset)
<< " > solution time " << tb;
return false;
}
@@ -842,7 +821,7 @@
NoncausalTimestampFilter::InterpolateOffset(points.first, points.second,
ta);
if (offset + ta > tb) {
- LOG(ERROR) << NodeNames() << " " << TimeString(ta, offset)
+ LOG(ERROR) << node_names_ << " " << TimeString(ta, offset)
<< " > solution time " << tb;
LOG(ERROR) << "Bracketing times are " << TimeString(points.first) << " and "
<< TimeString(points.second);
@@ -851,23 +830,21 @@
return true;
}
-void NoncausalTimestampFilter::Sample(
- aos::monotonic_clock::time_point monotonic_now,
- chrono::nanoseconds sample_ns) {
- if (samples_fp_) {
- saved_samples_.emplace_back(std::make_pair(monotonic_now, sample_ns));
- if (first_time_ != aos::monotonic_clock::min_time) {
- FlushSavedSamples();
- }
- }
+void NoncausalTimestampFilter::Sample(logger::BootTimestamp monotonic_now_all,
+ logger::BootDuration sample_ns) {
+ filter(monotonic_now_all.boot, sample_ns.boot)
+ ->Sample(monotonic_now_all.time, sample_ns.duration);
+}
+void NoncausalTimestampFilter::SingleFilter::Sample(
+ monotonic_clock::time_point monotonic_now, chrono::nanoseconds sample_ns) {
// The first sample is easy. Just do it!
if (timestamps_.size() == 0) {
- VLOG(1) << NodeNames() << " Initial sample of "
+ VLOG(1) << node_names_ << " Initial sample of "
<< TimeString(monotonic_now, sample_ns);
timestamps_.emplace_back(std::make_tuple(monotonic_now, sample_ns));
CHECK(!fully_frozen_)
- << ": " << NodeNames()
+ << ": " << node_names_
<< " Returned a horizontal line previously and then "
"got a new sample at "
<< monotonic_now << ", "
@@ -880,7 +857,7 @@
return;
}
CHECK_GT(monotonic_now, frozen_time_)
- << ": " << NodeNames() << " Tried to insert " << monotonic_now
+ << ": " << node_names_ << " Tried to insert " << monotonic_now
<< " before the frozen time of " << frozen_time_
<< ". Increase "
"--time_estimation_buffer_seconds to greater than "
@@ -895,7 +872,7 @@
aos::monotonic_clock::duration doffset = sample_ns - std::get<1>(back);
if (dt == chrono::nanoseconds(0) && doffset == chrono::nanoseconds(0)) {
- VLOG(1) << NodeNames() << " Duplicate sample of O(" << monotonic_now
+ VLOG(1) << node_names_ << " Duplicate sample of O(" << monotonic_now
<< ") = " << sample_ns.count() << ", remote time "
<< monotonic_now + sample_ns;
@@ -909,7 +886,7 @@
// negative slope, the point violates our constraint and will never be worth
// considering. Ignore it.
if (doffset < -dt * kMaxVelocity()) {
- VLOG(1) << std::setprecision(1) << std::fixed << NodeNames()
+ VLOG(1) << std::setprecision(1) << std::fixed << node_names_
<< " Rejected sample of " << TimeString(monotonic_now, sample_ns)
<< " because " << doffset.count() << " < "
<< (-dt * kMaxVelocity()).count() << " len "
@@ -920,7 +897,7 @@
// Be overly conservative here. It either won't make a difference, or
// will give us an error with an actual useful time difference.
CHECK(!fully_frozen_)
- << ": " << NodeNames()
+ << ": " << node_names_
<< " Returned a horizontal line previously and then got a new "
"sample at "
<< monotonic_now << ", "
@@ -944,7 +921,7 @@
// remove it. This is the non-causal part of the filter.
while (dt * kMaxVelocity() < doffset && timestamps_.size() > 1u) {
CHECK(!frozen(std::get<0>(back)))
- << ": " << NodeNames() << " Can't pop an already frozen sample "
+ << ": " << node_names_ << " Can't pop an already frozen sample "
<< TimeString(back) << " while inserting "
<< TimeString(monotonic_now, sample_ns) << ", "
<< chrono::duration<double>(monotonic_now - std::get<0>(back)).count()
@@ -952,7 +929,7 @@
"to greater than "
<< chrono::duration<double>(monotonic_now - std::get<0>(back))
.count();
- VLOG(1) << NodeNames()
+ VLOG(1) << node_names_
<< " Removing now invalid sample during back propegation of "
<< TimeString(back);
timestamps_.pop_back();
@@ -962,7 +939,7 @@
doffset = sample_ns - std::get<1>(back);
}
- VLOG(1) << NodeNames() << " Added sample of "
+ VLOG(1) << node_names_ << " Added sample of "
<< TimeString(monotonic_now, sample_ns);
timestamps_.emplace_back(std::make_tuple(monotonic_now, sample_ns));
return;
@@ -991,7 +968,7 @@
const chrono::nanoseconds doffset = original_offset - sample_ns;
if (dt == chrono::nanoseconds(0) && doffset >= chrono::nanoseconds(0)) {
- VLOG(1) << NodeNames() << " Redundant timestamp "
+ VLOG(1) << node_names_ << " Redundant timestamp "
<< TimeString(monotonic_now, sample_ns) << " because "
<< TimeString(timestamps_.front())
<< " is at the same time and a better solution.";
@@ -999,7 +976,7 @@
}
}
- VLOG(1) << NodeNames() << " Added sample at beginning "
+ VLOG(1) << node_names_ << " Added sample at beginning "
<< TimeString(monotonic_now, sample_ns);
timestamps_.insert(it, std::make_tuple(monotonic_now, sample_ns));
@@ -1012,7 +989,7 @@
const chrono::nanoseconds doffset = std::get<1>(*second) - sample_ns;
if (doffset < -dt * kMaxVelocity()) {
- VLOG(1) << NodeNames() << " Removing redundant sample of "
+ VLOG(1) << node_names_ << " Removing redundant sample of "
<< TimeString(*second) << " because "
<< TimeString(timestamps_.front())
<< " would make the slope too negative.";
@@ -1039,7 +1016,7 @@
std::get<1>(*third) - std::get<1>(*second);
if (doffset > dt * kMaxVelocity()) {
- VLOG(1) << NodeNames() << " Removing invalid sample of "
+ VLOG(1) << node_names_ << " Removing invalid sample of "
<< TimeString(*second) << " because " << TimeString(*third)
<< " would make the slope too positive.";
timestamps_.erase(second);
@@ -1052,7 +1029,7 @@
}
return;
} else {
- VLOG(1) << NodeNames() << " Found the next time " << std::get<0>(*(it - 1))
+ VLOG(1) << node_names_ << " Found the next time " << std::get<0>(*(it - 1))
<< " < " << monotonic_now << " < " << std::get<0>(*it);
{
@@ -1063,14 +1040,14 @@
// If we are worse than either the previous or next point, discard.
if (prior_doffset < -prior_dt * kMaxVelocity()) {
- VLOG(1) << NodeNames() << " Ignoring timestamp "
+ VLOG(1) << node_names_ << " Ignoring timestamp "
<< TimeString(monotonic_now, sample_ns) << " because "
<< TimeString(*(it - 1))
<< " is before and the slope would be too negative.";
return;
}
if (next_doffset > next_dt * kMaxVelocity()) {
- VLOG(1) << NodeNames() << " Ignoring timestamp "
+ VLOG(1) << node_names_ << " Ignoring timestamp "
<< TimeString(monotonic_now, sample_ns) << " because "
<< TimeString(*it)
<< " is following and the slope would be too positive.";
@@ -1083,7 +1060,7 @@
// new.
auto middle_it =
timestamps_.insert(it, std::make_tuple(monotonic_now, sample_ns));
- VLOG(1) << NodeNames() << " Inserted " << TimeString(*middle_it);
+ VLOG(1) << node_names_ << " Inserted " << TimeString(*middle_it);
while (middle_it != timestamps_.end() && middle_it != timestamps_.begin()) {
auto next_it =
@@ -1099,7 +1076,7 @@
std::get<1>(*next_it) - std::get<1>(*middle_it);
if (next_doffset < -next_dt * kMaxVelocity()) {
- VLOG(1) << NodeNames()
+ VLOG(1) << node_names_
<< " Next slope is too negative, removing next point "
<< TimeString(*next_it);
next_it = timestamps_.erase(next_it);
@@ -1119,7 +1096,7 @@
if (prior_doffset > prior_dt * kMaxVelocity()) {
CHECK(!frozen(std::get<0>(*prior_it)))
- << ": " << NodeNames()
+ << ": " << node_names_
<< " Can't pop an already frozen sample. Increase "
"--time_estimation_buffer_seconds to greater than "
<< chrono::duration<double>(prior_dt).count();
@@ -1137,23 +1114,27 @@
}
}
-bool NoncausalTimestampFilter::Pop(aos::monotonic_clock::time_point time) {
+bool NoncausalTimestampFilter::Pop(logger::BootTimestamp time) {
+ // TODO(austin): Auto compute the second boot.
+ CHECK_LE(filters_.size(), 1u);
+ SingleFilter *f = filter(time.boot, 0);
VLOG(1) << NodeNames() << " Pop(" << time << ")";
bool removed = false;
// When the timestamp which is the end of the line is popped, we want to
// drop it off the list. Hence the >=
- while (timestamps_.size() >= 2 && time >= std::get<0>(timestamps_[1])) {
- PopFront();
+ while (f->timestamps_size() >= 2 &&
+ time.time >= std::get<0>(f->timestamp(1))) {
+ f->PopFront();
removed = true;
}
return removed;
}
-void NoncausalTimestampFilter::Debug() {
+void NoncausalTimestampFilter::SingleFilter::Debug() const {
size_t count = 0;
for (std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>
timestamp : timestamps_) {
- LOG(INFO) << NodeNames() << " "
+ LOG(INFO) << node_names_ << " "
<< TimeString(std::get<0>(timestamp), std::get<1>(timestamp))
<< " frozen? " << frozen(std::get<0>(timestamp)) << " consumed? "
<< (count < next_to_consume_);
@@ -1161,8 +1142,8 @@
}
}
-monotonic_clock::time_point NoncausalTimestampFilter::unobserved_line_end()
- const {
+monotonic_clock::time_point
+NoncausalTimestampFilter::SingleFilter::unobserved_line_end() const {
if (has_unobserved_line()) {
return std::get<0>(timestamp(next_to_consume_ + 1));
}
@@ -1170,7 +1151,7 @@
}
monotonic_clock::time_point
-NoncausalTimestampFilter::unobserved_line_remote_end() const {
+NoncausalTimestampFilter::SingleFilter::unobserved_line_remote_end() const {
if (has_unobserved_line()) {
const std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> t =
timestamp(next_to_consume_ + 1);
@@ -1179,33 +1160,33 @@
return monotonic_clock::min_time;
}
-bool NoncausalTimestampFilter::has_unobserved_line() const {
+bool NoncausalTimestampFilter::SingleFilter::has_unobserved_line() const {
return next_to_consume_ + 1 < timestamps_.size();
}
std::optional<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
-NoncausalTimestampFilter::Observe() const {
+NoncausalTimestampFilter::SingleFilter::Observe() const {
if (timestamps_.empty() || next_to_consume_ >= timestamps_.size()) {
return std::nullopt;
}
- VLOG(1) << NodeNames() << " Observed sample of "
+ VLOG(1) << node_names_ << " Observed sample of "
<< TimeString(timestamp(next_to_consume_));
return timestamp(next_to_consume_);
}
std::optional<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
-NoncausalTimestampFilter::Consume() {
+NoncausalTimestampFilter::SingleFilter::Consume() {
if (timestamps_.empty() || next_to_consume_ >= timestamps_.size()) {
return std::nullopt;
}
auto result = timestamp(next_to_consume_);
- VLOG(1) << NodeNames() << " Consumed sample of " << TimeString(result);
+ VLOG(1) << node_names_ << " Consumed sample of " << TimeString(result);
++next_to_consume_;
return result;
}
-void NoncausalTimestampFilter::FreezeUntil(
+void NoncausalTimestampFilter::SingleFilter::FreezeUntil(
aos::monotonic_clock::time_point node_monotonic_now) {
if (node_monotonic_now < frozen_time_) {
return;
@@ -1219,19 +1200,19 @@
}
if (timestamps_.empty()) {
- VLOG(1) << NodeNames() << " fully_frozen_, no timestamps.";
+ VLOG(1) << node_names_ << " fully_frozen_, no timestamps.";
fully_frozen_ = true;
} else if (node_monotonic_now > std::get<0>(timestamps_.back())) {
// We've been asked to freeze past the last point. It isn't safe to add any
// more points or we will change this region.
- VLOG(1) << NodeNames() << " fully_frozen_, after the end.";
+ VLOG(1) << node_names_ << " fully_frozen_, after the end.";
fully_frozen_ = true;
} else {
LOG(FATAL) << "How did we get here?";
}
}
-void NoncausalTimestampFilter::FreezeUntilRemote(
+void NoncausalTimestampFilter::SingleFilter::FreezeUntilRemote(
aos::monotonic_clock::time_point remote_monotonic_now) {
for (size_t i = 0; i < timestamps_.size(); ++i) {
// Freeze 1 point past the match.
@@ -1243,44 +1224,20 @@
}
if (timestamps_.empty()) {
- VLOG(1) << NodeNames() << " fully_frozen_, no timestamps.";
+ VLOG(1) << node_names_ << " fully_frozen_, no timestamps.";
fully_frozen_ = true;
} else if (remote_monotonic_now > std::get<0>(timestamps_.back()) +
std::get<1>(timestamps_.back())) {
// We've been asked to freeze past the last point. It isn't safe to add any
// more points or we will change this region.
- VLOG(1) << NodeNames() << " fully_frozen_, after the end.";
+ VLOG(1) << node_names_ << " fully_frozen_, after the end.";
fully_frozen_ = true;
} else {
LOG(FATAL) << "How did we get here?";
}
}
-void NoncausalTimestampFilter::SetFirstTime(
- aos::monotonic_clock::time_point time) {
- first_time_ = time;
- if (fp_) {
- fp_ = freopen(NULL, "wb", fp_);
- PrintNoncausalTimestampFilterHeader(fp_);
- }
- if (samples_fp_) {
- samples_fp_ = freopen(NULL, "wb", samples_fp_);
- PrintNoncausalTimestampFilterSamplesHeader(samples_fp_);
- FlushSavedSamples();
- }
-}
-
-void NoncausalTimestampFilter::SetCsvFileName(std::string_view name) {
- fp_ = fopen(absl::StrCat(name, ".csv").c_str(), "w");
- samples_fp_ = fopen(absl::StrCat(name, "_samples.csv").c_str(), "w");
- PrintNoncausalTimestampFilterHeader(fp_);
- PrintNoncausalTimestampFilterSamplesHeader(samples_fp_);
-}
-
-void NoncausalTimestampFilter::PopFront() {
- VLOG(1) << NodeNames() << " Popped sample of " << TimeString(timestamp(0));
- MaybeWriteTimestamp(timestamp(0));
-
+void NoncausalTimestampFilter::SingleFilter::PopFront() {
// If we drop data, we shouldn't add anything before that point.
frozen_time_ = std::max(frozen_time_, std::get<0>(timestamp(0)));
timestamps_.pop_front();
@@ -1290,60 +1247,48 @@
}
}
-void NoncausalTimestampFilter::MaybeWriteTimestamp(
- std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>
- timestamp) {
- if (fp_ && first_time_ != aos::monotonic_clock::min_time) {
- fprintf(fp_, "%.9f, %.9f, %.9f\n",
- std::chrono::duration_cast<std::chrono::duration<double>>(
- std::get<0>(timestamp) - first_time_)
- .count(),
- std::chrono::duration_cast<std::chrono::duration<double>>(
- std::get<0>(timestamp).time_since_epoch())
- .count(),
- std::chrono::duration_cast<std::chrono::duration<double>>(
- std::get<1>(timestamp))
- .count());
- fflush(fp_);
- }
-}
-
void NoncausalOffsetEstimator::Sample(
- const Node *node, aos::monotonic_clock::time_point node_delivered_time,
- aos::monotonic_clock::time_point other_node_sent_time) {
+ const Node *node, logger::BootTimestamp node_delivered_time,
+ logger::BootTimestamp other_node_sent_time) {
VLOG(1) << "Sample delivered " << node_delivered_time << " sent "
<< other_node_sent_time << " " << node->name()->string_view()
<< " -> "
<< ((node == node_a_) ? node_b_ : node_a_)->name()->string_view();
if (node == node_a_) {
- a_.Sample(node_delivered_time, other_node_sent_time - node_delivered_time);
+ a_.Sample(node_delivered_time,
+ {other_node_sent_time.boot,
+ other_node_sent_time.time - node_delivered_time.time});
} else if (node == node_b_) {
- b_.Sample(node_delivered_time, other_node_sent_time - node_delivered_time);
+ b_.Sample(node_delivered_time,
+ {other_node_sent_time.boot,
+ other_node_sent_time.time - node_delivered_time.time});
} else {
LOG(FATAL) << "Unknown node " << node->name()->string_view();
}
}
void NoncausalOffsetEstimator::ReverseSample(
- const Node *node, aos::monotonic_clock::time_point node_sent_time,
- aos::monotonic_clock::time_point other_node_delivered_time) {
+ const Node *node, logger::BootTimestamp node_sent_time,
+ logger::BootTimestamp other_node_delivered_time) {
VLOG(1) << "Reverse sample delivered " << other_node_delivered_time
<< " sent " << node_sent_time << " "
<< ((node == node_a_) ? node_b_ : node_a_)->name()->string_view()
<< " -> " << node->name()->string_view();
if (node == node_a_) {
b_.Sample(other_node_delivered_time,
- node_sent_time - other_node_delivered_time);
+ {node_sent_time.boot,
+ node_sent_time.time - other_node_delivered_time.time});
} else if (node == node_b_) {
a_.Sample(other_node_delivered_time,
- node_sent_time - other_node_delivered_time);
+ {node_sent_time.boot,
+ node_sent_time.time - other_node_delivered_time.time});
} else {
LOG(FATAL) << "Unknown node " << node->name()->string_view();
}
}
-bool NoncausalOffsetEstimator::Pop(
- const Node *node, aos::monotonic_clock::time_point node_monotonic_now) {
+bool NoncausalOffsetEstimator::Pop(const Node *node,
+ logger::BootTimestamp node_monotonic_now) {
if (node == node_a_) {
if (a_.Pop(node_monotonic_now)) {
VLOG(1) << "Popping forward sample to " << node_a_->name()->string_view()
diff --git a/aos/network/timestamp_filter.h b/aos/network/timestamp_filter.h
index 7ef32f4..166e811 100644
--- a/aos/network/timestamp_filter.h
+++ b/aos/network/timestamp_filter.h
@@ -8,6 +8,7 @@
#include <deque>
#include "aos/configuration.h"
+#include "aos/events/logging/boot_timestamp.h"
#include "aos/time/time.h"
#include "glog/logging.h"
@@ -240,139 +241,197 @@
public:
NoncausalTimestampFilter(const Node *node_a, const Node *node_b)
: node_a_(node_a), node_b_(node_b) {}
+
+ NoncausalTimestampFilter(NoncausalTimestampFilter &&) noexcept = default;
+ NoncausalTimestampFilter &operator=(
+ NoncausalTimestampFilter &&other) noexcept {
+ // Sigh, std::vector really prefers to copy than move. We don't want to
+ // copy this class or we will end up with double counted samples or put
+ // something in the file twice. The only way it will move instead of copy
+ // is if we implement a noexcept move assignment operator.
+ node_a_ = other.node_a_;
+ other.node_a_ = nullptr;
+ node_b_ = other.node_b_;
+ other.node_b_ = nullptr;
+
+ filters_ = std::move(other.filters_);
+ current_filter_ = other.current_filter_;
+ return *this;
+ }
+ NoncausalTimestampFilter(const NoncausalTimestampFilter &) = delete;
+ NoncausalTimestampFilter &operator=(const NoncausalTimestampFilter &) =
+ delete;
~NoncausalTimestampFilter();
- // Check whether the given timestamp falls within our current samples
- bool IsOutsideSamples(monotonic_clock::time_point ta_base, double ta) const;
-
- // Check whether the given timestamp lies after our current samples
- bool IsAfterSamples(monotonic_clock::time_point ta_base, double ta) const;
-
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>
- GetReferenceTimestamp(monotonic_clock::time_point ta_base, double ta) const;
-
- // Returns the offset for the point in time, using the timestamps in the deque
- // to form a polyline used to interpolate.
- std::chrono::nanoseconds Offset(monotonic_clock::time_point ta) const;
- std::pair<std::chrono::nanoseconds, double> Offset(
- monotonic_clock::time_point ta_base, double ta) const;
-
// Returns the error between the offset in the provided timestamps, and the
// offset at ta.
- double OffsetError(aos::monotonic_clock::time_point ta_base, double ta,
- aos::monotonic_clock::time_point tb_base, double tb) const;
+ double OffsetError(logger::BootTimestamp ta_base, double ta,
+ logger::BootTimestamp tb_base, double tb) const {
+ return filter(ta_base.boot, tb_base.boot)
+ ->OffsetError(ta_base.time, ta, tb_base.time, tb);
+ }
// Returns the string representation of 2 * OffsetError(ta, tb)
- std::string DebugOffsetError(aos::monotonic_clock::time_point ta_base,
- double ta,
- aos::monotonic_clock::time_point tb_base,
- double tb, size_t node_a, size_t node_b) const;
+ std::string DebugOffsetError(logger::BootTimestamp ta_base, double ta,
+ logger::BootTimestamp tb_base, double tb,
+ size_t node_a, size_t node_b) const;
// Confirms that the solution meets the constraints. Returns true on success.
- bool ValidateSolution(aos::monotonic_clock::time_point ta,
- aos::monotonic_clock::time_point tb) const;
-
- double Convert(double ta) const {
- return ta +
- static_cast<double>(
- Offset(monotonic_clock::epoch(), ta).first.count()) +
- Offset(monotonic_clock::epoch(), ta).second;
+ bool ValidateSolution(logger::BootTimestamp ta,
+ logger::BootTimestamp tb) const {
+ return filter(ta.boot, tb.boot)->ValidateSolution(ta.time, tb.time);
}
// Adds a new sample to our filtered timestamp list.
- void Sample(aos::monotonic_clock::time_point monotonic_now,
- std::chrono::nanoseconds sample_ns);
+ void Sample(logger::BootTimestamp monotonic_now,
+ logger::BootDuration sample_ns);
// Removes any old timestamps from our timestamps list.
// Returns true if any points were popped.
- bool Pop(aos::monotonic_clock::time_point time);
+ bool Pop(logger::BootTimestamp time);
- std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>
- timestamp(size_t i) const {
- if (i == 0u && timestamps_.size() >= 2u && !has_popped_) {
- std::chrono::nanoseconds dt =
- std::get<0>(timestamps_[1]) - std::get<0>(timestamps_[0]);
- std::chrono::nanoseconds doffset =
- std::get<1>(timestamps_[1]) - std::get<1>(timestamps_[0]);
-
- // If we are early in the log file, the filter hasn't had time to get
- // started. We might only have 2 samples, and the first sample was
- // incredibly delayed, violating our velocity constraint. In that case,
- // modify the first sample (rather than remove it) to retain the knowledge
- // of the velocity, but adhere to the constraints.
- //
- // We are doing this here so as points get added in any order, we don't
- // confuse ourselves about what really happened.
- if (doffset > dt * kMaxVelocity()) {
- const aos::monotonic_clock::duration adjusted_initial_time =
- std::get<1>(timestamps_[1]) -
- aos::monotonic_clock::duration(
- static_cast<aos::monotonic_clock::duration::rep>(
- dt.count() * kMaxVelocity()));
-
- return std::make_tuple(std::get<0>(timestamps_[0]),
- adjusted_initial_time);
- }
+ size_t timestamps_size() const {
+ size_t result = 0u;
+ for (const BootFilter &filter : filters_) {
+ result += filter.filter.timestamps_size();
}
- return std::make_tuple(std::get<0>(timestamps_[i]),
- std::get<1>(timestamps_[i]));
+ return result;
}
- // Returns if the timestamp is frozen or not.
- bool frozen(size_t index) const {
- return fully_frozen_ || std::get<0>(timestamps_[index]) <= frozen_time_;
+ // For testing only:
+ void Debug() const {
+ for (const BootFilter &filter : filters_) {
+ LOG(INFO) << NodeNames() << " boota: " << filter.boot.first << ", "
+ << filter.boot.second;
+ filter.filter.Debug();
+ }
}
- bool frozen(aos::monotonic_clock::time_point t) const {
- return t <= frozen_time_;
- }
-
- size_t timestamps_size() const { return timestamps_.size(); }
-
- // Returns a debug string with the nodes this filter represents.
- std::string NodeNames() const;
-
- void Debug();
-
- // Sets the starting point and filename to log samples to. These functions
- // are only used when doing CSV file logging to debug the filter.
- void SetFirstTime(aos::monotonic_clock::time_point time);
- void SetCsvFileName(std::string_view name);
-
// Marks all line segments up until the provided time on the provided node as
// used.
- void FreezeUntil(aos::monotonic_clock::time_point node_monotonic_now);
- void FreezeUntilRemote(aos::monotonic_clock::time_point remote_monotonic_now);
+ void FreezeUntil(logger::BootTimestamp node_monotonic_now,
+ logger::BootTimestamp remote_monotonic_now) {
+ // TODO(austin): CHECK that all older boots are fully frozen.
+ filter(node_monotonic_now.boot, remote_monotonic_now.boot)
+ ->FreezeUntil(node_monotonic_now.time);
+ filter(node_monotonic_now.boot, remote_monotonic_now.boot)
+ ->FreezeUntilRemote(remote_monotonic_now.time);
+ }
// Returns true if there is a full line which hasn't been observed.
- bool has_unobserved_line() const;
+ bool has_unobserved_line() const {
+ return filters_.back().filter.has_unobserved_line();
+ }
// Returns the time of the second point in the unobserved line, or min_time if
// there is no line.
- monotonic_clock::time_point unobserved_line_end() const;
+ logger::BootTimestamp unobserved_line_end() const {
+ auto &f = filters_.back();
+ return {static_cast<size_t>(f.boot.first), f.filter.unobserved_line_end()};
+ }
// Returns the time of the second point in the unobserved line on the remote
// node, or min_time if there is no line.
- monotonic_clock::time_point unobserved_line_remote_end() const;
+ logger::BootTimestamp unobserved_line_remote_end() const {
+ auto &f = filters_.back();
+ return {static_cast<size_t>(f.boot.second),
+ f.filter.unobserved_line_remote_end()};
+ }
// Returns the next timestamp in the queue if available without incrementing
// the pointer. This, Consume, and FreezeUntil work together to allow
// tracking and freezing timestamps which have been combined externally.
- std::optional<
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
- Observe() const;
+ std::optional<std::tuple<logger::BootTimestamp, logger::BootDuration>>
+ Observe() const {
+ if (filters_.size() == 0u) {
+ return std::nullopt;
+ }
+
+ size_t current_filter = current_filter_;
+ while (true) {
+ const BootFilter &filter = filters_[current_filter];
+ std::optional<
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ result = filter.filter.Observe();
+ if (!result) {
+ if (current_filter + 1 == filters_.size()) {
+ return std::nullopt;
+ } else {
+ ++current_filter;
+ continue;
+ }
+ }
+ return std::make_tuple(
+ logger::BootTimestamp{static_cast<size_t>(filter.boot.first),
+ std::get<0>(*result)},
+ logger::BootDuration{static_cast<size_t>(filter.boot.second),
+ std::get<1>(*result)});
+ }
+ }
// Returns the next timestamp in the queue if available, incrementing the
// pointer.
- std::optional<
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
- Consume();
+ std::optional<std::tuple<logger::BootTimestamp, logger::BootDuration>>
+ Consume() {
+ if (filters_.size() == 0u) {
+ return std::nullopt;
+ }
+ DCHECK_LT(current_filter_, filters_.size());
+
+ while (true) {
+ BootFilter &filter = filters_[current_filter_];
+ std::optional<
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ result = filter.filter.Consume();
+ if (!result) {
+ if (current_filter_ + 1 == filters_.size()) {
+ return std::nullopt;
+ } else {
+ ++current_filter_;
+ continue;
+ }
+ }
+ return std::make_tuple(
+ logger::BootTimestamp{static_cast<size_t>(filter.boot.first),
+ std::get<0>(*result)},
+ logger::BootDuration{static_cast<size_t>(filter.boot.second),
+ std::get<1>(*result)});
+ }
+ }
// Public for testing.
+ // Returns the offset for the point in time, using the timestamps in the deque
+ // to form a polyline used to interpolate.
+ logger::BootDuration Offset(logger::BootTimestamp ta,
+ size_t sample_boot) const {
+ return {sample_boot, filter(ta.boot, sample_boot)->Offset(ta.time)};
+ }
+
+ std::pair<logger::BootDuration, double> Offset(logger::BootTimestamp ta_base,
+ double ta,
+ size_t sample_boot) const {
+ std::pair<std::chrono::nanoseconds, double> result =
+ filter(ta_base.boot, sample_boot)->Offset(ta_base.time, ta);
+ return std::make_pair(logger::BootDuration{sample_boot, result.first},
+ result.second);
+ }
+
// Assuming that there are at least 2 points in timestamps_, finds the 2
// matching points.
- std::pair<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
- FindTimestamps(monotonic_clock::time_point ta) const;
- std::pair<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
- std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
- FindTimestamps(monotonic_clock::time_point ta_base, double ta) const;
+ std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
+ std::tuple<logger::BootTimestamp, logger::BootDuration>>
+ FindTimestamps(logger::BootTimestamp ta, size_t sample_boot) const {
+ std::pair<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ result = filter(ta.boot, sample_boot)->FindTimestamps(ta.time);
+ return std::make_pair(
+ std::make_tuple(
+ logger::BootTimestamp{ta.boot, std::get<0>(result.first)},
+ logger::BootDuration{sample_boot, std::get<1>(result.first)}),
+ std::make_tuple(
+ logger::BootTimestamp{ta.boot, std::get<0>(result.second)},
+ logger::BootDuration{sample_boot, std::get<1>(result.second)}));
+ }
+ std::pair<std::tuple<logger::BootTimestamp, logger::BootDuration>,
+ std::tuple<logger::BootTimestamp, logger::BootDuration>>
+ FindTimestamps(logger::BootTimestamp ta_base, double ta,
+ size_t sample_boot) const;
static std::chrono::nanoseconds InterpolateOffset(
std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds> p0,
@@ -405,47 +464,215 @@
const Node *node_b() const { return node_b_; }
private:
+ // This class holds all the state for the filter for a single pair of boots.
+ class SingleFilter {
+ public:
+ SingleFilter(std::string node_names) : node_names_(std::move(node_names)) {}
+ SingleFilter(SingleFilter &&other) noexcept
+ : node_names_(std::move(other.node_names_)),
+ timestamps_(std::move(other.timestamps_)),
+ frozen_time_(other.frozen_time_),
+ next_to_consume_(other.next_to_consume_),
+ fully_frozen_(other.fully_frozen_),
+ has_popped_(other.has_popped_) {}
+
+ SingleFilter &operator=(SingleFilter &&other) noexcept = default;
+ SingleFilter(const SingleFilter &) = delete;
+ SingleFilter operator=(const SingleFilter &) = delete;
+ ~SingleFilter();
+
+ std::pair<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ FindTimestamps(monotonic_clock::time_point ta) const;
+ std::pair<std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>,
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ FindTimestamps(monotonic_clock::time_point ta_base, double ta) const;
+
+ // Check whether the given timestamp falls within our current samples
+ bool IsOutsideSamples(monotonic_clock::time_point ta_base, double ta) const;
+ // Check whether the given timestamp lies after our current samples
+ bool IsAfterSamples(monotonic_clock::time_point ta_base, double ta) const;
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>
+ GetReferenceTimestamp(monotonic_clock::time_point ta_base, double ta) const;
+
+ std::chrono::nanoseconds Offset(monotonic_clock::time_point ta) const;
+ std::pair<std::chrono::nanoseconds, double> Offset(
+ monotonic_clock::time_point ta_base, double ta) const;
+ double OffsetError(aos::monotonic_clock::time_point ta_base, double ta,
+ aos::monotonic_clock::time_point tb_base,
+ double tb) const;
+ bool has_unobserved_line() const;
+ monotonic_clock::time_point unobserved_line_end() const;
+ monotonic_clock::time_point unobserved_line_remote_end() const;
+ std::optional<
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ Observe() const;
+ std::optional<
+ std::tuple<monotonic_clock::time_point, std::chrono::nanoseconds>>
+ Consume();
+ void FreezeUntil(aos::monotonic_clock::time_point node_monotonic_now);
+ void FreezeUntilRemote(
+ aos::monotonic_clock::time_point remote_monotonic_now);
+ void PopFront();
+ void Debug() const;
+
+ // Returns if the timestamp is frozen or not.
+ bool frozen(size_t index) const {
+ return fully_frozen_ || std::get<0>(timestamps_[index]) <= frozen_time_;
+ }
+
+ bool frozen(aos::monotonic_clock::time_point t) const {
+ return t <= frozen_time_;
+ }
+
+ size_t timestamps_size() const { return timestamps_.size(); }
+
+ std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>
+ timestamp(size_t i) const {
+ if (i == 0u && timestamps_.size() >= 2u && !has_popped_) {
+ std::chrono::nanoseconds dt =
+ std::get<0>(timestamps_[1]) - std::get<0>(timestamps_[0]);
+ std::chrono::nanoseconds doffset =
+ std::get<1>(timestamps_[1]) - std::get<1>(timestamps_[0]);
+
+ // If we are early in the log file, the filter hasn't had time to get
+ // started. We might only have 2 samples, and the first sample was
+ // incredibly delayed, violating our velocity constraint. In that case,
+ // modify the first sample (rather than remove it) to retain the
+ // knowledge of the velocity, but adhere to the constraints.
+ //
+ // We are doing this here so as points get added in any order, we don't
+ // confuse ourselves about what really happened.
+ if (doffset > dt * kMaxVelocity()) {
+ const aos::monotonic_clock::duration adjusted_initial_time =
+ std::get<1>(timestamps_[1]) -
+ aos::monotonic_clock::duration(
+ static_cast<aos::monotonic_clock::duration::rep>(
+ dt.count() * kMaxVelocity()));
+
+ return std::make_tuple(std::get<0>(timestamps_[0]),
+ adjusted_initial_time);
+ }
+ }
+ return std::make_tuple(std::get<0>(timestamps_[i]),
+ std::get<1>(timestamps_[i]));
+ }
+ // Confirms that the solution meets the constraints. Returns true on
+ // success.
+ bool ValidateSolution(aos::monotonic_clock::time_point ta,
+ aos::monotonic_clock::time_point tb) const;
+
+ void Sample(monotonic_clock::time_point monotonic_now,
+ std::chrono::nanoseconds sample_ns);
+
+ private:
+ std::string node_names_;
+
+ // Timestamp, offest, and then a boolean representing if this sample is
+ // frozen and can't be modified or not.
+ std::deque<
+ std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>>
+ timestamps_;
+
+ aos::monotonic_clock::time_point frozen_time_ =
+ aos::monotonic_clock::min_time;
+
+ // The index of the next element in timestamps to consume. 0 means none
+ // have been consumed, and size() means all have been consumed.
+ size_t next_to_consume_ = 0;
+
+ bool fully_frozen_ = false;
+
+ bool has_popped_ = false;
+ };
+
// Removes the oldest timestamp.
void PopFront();
- // Writes a timestamp to the file if it is reasonable.
- void MaybeWriteTimestamp(
- std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>
- timestamp);
-
// Writes any saved timestamps to file.
void FlushSavedSamples();
- const Node *const node_a_;
- const Node *const node_b_;
+ const Node *node_a_;
+ const Node *node_b_;
- // Timestamp, offest, and then a boolean representing if this sample is frozen
- // and can't be modified or not.
- std::deque<
- std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>>
- timestamps_;
+ // Returns a debug string with the nodes this filter represents.
+ std::string NodeNames() const;
- aos::monotonic_clock::time_point frozen_time_ =
- aos::monotonic_clock::min_time;
+ struct BootFilter {
+ BootFilter(std::pair<int, int> new_boot, std::string node_names)
+ : boot(new_boot), filter(std::move(node_names)) {}
- // The index of the next element in timestamps to consume. 0 means none have
- // been consumed, and size() means all have been consumed.
- size_t next_to_consume_ = 0;
+ BootFilter(BootFilter &&other) noexcept = default;
+ BootFilter &operator=(BootFilter &&other) noexcept = default;
+ BootFilter(const BootFilter &) = delete;
+ void operator=(const BootFilter &) = delete;
+ std::pair<int, int> boot;
+ SingleFilter filter;
+ };
- // Holds any timestamps from before the start of the log to be flushed when we
- // know when the log starts.
- std::vector<
- std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>>
- saved_samples_;
+ static bool FilterLessThanUpper(const std::pair<int, int> &l,
+ const BootFilter &r) {
+ return l < r.boot;
+ }
+ static bool FilterLessThanLower(const BootFilter &l,
+ const std::pair<int, int> &r) {
+ return l.boot < r;
+ }
- FILE *fp_ = nullptr;
- FILE *samples_fp_ = nullptr;
+ protected:
+ SingleFilter *filter(int boota, int bootb) {
+ auto it =
+ std::lower_bound(filters_.begin(), filters_.end(),
+ std::make_pair(boota, bootb), FilterLessThanLower);
+ if (it != filters_.end() && it->boot == std::make_pair(boota, bootb)) {
+ return &it->filter;
+ }
- bool fully_frozen_ = false;
+ if (!filters_.empty()) {
+ CHECK_LT(current_filter_, filters_.size());
+ CHECK_GE(boota, filters_[current_filter_].boot.first);
+ CHECK_GE(bootb, filters_[current_filter_].boot.second);
+ }
+ SingleFilter *result =
+ &filters_
+ .emplace(std::upper_bound(filters_.begin(), filters_.end(),
+ std::make_pair(boota, bootb),
+ FilterLessThanUpper),
+ std::make_pair(boota, bootb), NodeNames())
+ ->filter;
- bool has_popped_ = false;
+ {
+ // Confirm we don't have boots go backwards.
+ // It is impossible for us to get (0, 0), (0, 1), (1, 0), (1, 1). That
+ // means that both boots on both devices talked to both other boots.
+ int last_boota = -1;
+ int last_bootb = -1;
+ for (const BootFilter &filter : filters_) {
+ CHECK(filter.boot.first != last_boota ||
+ filter.boot.second != last_bootb)
+ << ": Boots didn't increase.";
+ CHECK_GE(filter.boot.first, last_boota);
+ CHECK_GE(filter.boot.second, last_bootb);
+ last_boota = filter.boot.first;
+ last_bootb = filter.boot.second;
+ }
+ }
+ return result;
+ }
- aos::monotonic_clock::time_point first_time_ = aos::monotonic_clock::min_time;
+ const SingleFilter *filter(int boota, int bootb) const {
+ auto it =
+ std::lower_bound(filters_.begin(), filters_.end(),
+ std::make_pair(boota, bootb), FilterLessThanLower);
+ CHECK(it != filters_.end());
+ CHECK(it->boot == std::make_pair(boota, bootb));
+ return &it->filter;
+ }
+
+ private:
+ std::vector<BootFilter> filters_;
+
+ size_t current_filter_ = 0;
};
// This class holds 2 NoncausalTimestampFilter's and handles averaging the
@@ -457,6 +684,12 @@
b_(node_b, node_a),
node_a_(node_a),
node_b_(node_b) {}
+ NoncausalOffsetEstimator(NoncausalOffsetEstimator &&) noexcept = default;
+ NoncausalOffsetEstimator &operator=(
+ NoncausalOffsetEstimator &&other) noexcept = default;
+ NoncausalOffsetEstimator(const NoncausalOffsetEstimator &) = delete;
+ NoncausalOffsetEstimator &operator=(const NoncausalOffsetEstimator &) =
+ delete;
NoncausalTimestampFilter *GetFilter(const Node *n) {
if (n == node_a_) {
@@ -472,39 +705,23 @@
// Updates the filter for the provided node based on a sample from the
// provided node to the other node.
- void Sample(const Node *node,
- aos::monotonic_clock::time_point node_delivered_time,
- aos::monotonic_clock::time_point other_node_sent_time);
+ void Sample(const Node *node, logger::BootTimestamp node_delivered_time,
+ logger::BootTimestamp other_node_sent_time);
// Updates the filter for the provided node based on a sample going to the
// provided node from the other node.
- void ReverseSample(
- const Node *node, aos::monotonic_clock::time_point node_sent_time,
- aos::monotonic_clock::time_point other_node_delivered_time);
+ void ReverseSample(const Node *node, logger::BootTimestamp node_sent_time,
+ logger::BootTimestamp other_node_delivered_time);
// Removes old data points from a node before the provided time.
// Returns true if any points were popped.
- bool Pop(const Node *node,
- aos::monotonic_clock::time_point node_monotonic_now);
-
- // Returns the data points from each filter.
- size_t a_timestamps_size() const { return a_.timestamps_size(); }
- size_t b_timestamps_size() const { return b_.timestamps_size(); }
-
- void SetFirstFwdTime(monotonic_clock::time_point time) {
- a_.SetFirstTime(time);
- }
- void SetFwdCsvFileName(std::string_view name) { a_.SetCsvFileName(name); }
- void SetFirstRevTime(monotonic_clock::time_point time) {
- b_.SetFirstTime(time);
- }
- void SetRevCsvFileName(std::string_view name) { b_.SetCsvFileName(name); }
+ bool Pop(const Node *node, logger::BootTimestamp node_monotonic_now);
private:
NoncausalTimestampFilter a_;
NoncausalTimestampFilter b_;
- const Node *const node_a_;
- const Node *const node_b_;
+ const Node *node_a_;
+ const Node *node_b_;
};
} // namespace message_bridge
diff --git a/aos/network/timestamp_filter_test.cc b/aos/network/timestamp_filter_test.cc
index 95efe08..5544711 100644
--- a/aos/network/timestamp_filter_test.cc
+++ b/aos/network/timestamp_filter_test.cc
@@ -14,6 +14,26 @@
namespace chrono = std::chrono;
using aos::monotonic_clock;
+using logger::BootDuration;
+using logger::BootTimestamp;
+
+class TestingNoncausalTimestampFilter : public NoncausalTimestampFilter {
+ public:
+ TestingNoncausalTimestampFilter(const Node *node_a, const Node *node_b)
+ : NoncausalTimestampFilter(node_a, node_b) {}
+
+ bool frozen(size_t index) const { return filter(0, 0)->frozen(index); }
+ bool frozen(logger::BootTimestamp t) const {
+ return filter(t.boot, 0)->frozen(t.time);
+ }
+
+ std::tuple<BootTimestamp, BootDuration> timestamp(size_t i) const {
+ std::tuple<aos::monotonic_clock::time_point, std::chrono::nanoseconds>
+ result = filter(0, 0)->timestamp(i);
+ return std::make_tuple(BootTimestamp{0, std::get<0>(result)},
+ BootDuration{0, std::get<1>(result)});
+ }
+};
// Tests that adding samples tracks more negative offsets down quickly, and
// slowly comes back up.
@@ -93,16 +113,19 @@
// Tests that 2 samples results in the correct line between them, and the
// correct intermediate as it is being built.
TEST_F(NoncausalTimestampFilterTest, PeekPop) {
- const monotonic_clock::time_point ta(chrono::nanoseconds(100000));
- const chrono::nanoseconds oa(chrono::nanoseconds(1000));
- const monotonic_clock::time_point tb(chrono::nanoseconds(200000));
- const chrono::nanoseconds ob(chrono::nanoseconds(1100));
- const monotonic_clock::time_point tc(chrono::nanoseconds(300000));
- const chrono::nanoseconds oc(chrono::nanoseconds(1010));
+ const BootTimestamp ta{
+ 0, monotonic_clock::time_point(chrono::nanoseconds(100000))};
+ const BootDuration oa{0, chrono::nanoseconds(1000)};
+ const BootTimestamp tb{
+ 0, monotonic_clock::time_point(chrono::nanoseconds(200000))};
+ const BootDuration ob{0, chrono::nanoseconds(1100)};
+ const BootTimestamp tc{
+ 0, monotonic_clock::time_point(chrono::nanoseconds(300000))};
+ const BootDuration oc{0, chrono::nanoseconds(1010)};
// Simple case, everything is done in order, nothing is dropped.
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
@@ -119,7 +142,7 @@
// Now try again while dropping ta after popping it.
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
@@ -138,7 +161,7 @@
// Now try again while dropping ta before popping it.
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
@@ -155,103 +178,114 @@
// Tests that invalid samples get clipped as expected.
TEST_F(NoncausalTimestampFilterTest, ClippedSample) {
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const monotonic_clock::time_point tc(chrono::milliseconds(2));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootTimestamp tc{0,
+ monotonic_clock::time_point(chrono::milliseconds(2))};
{
// A positive slope of 1 ms/second is properly applied.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
- filter.Sample(ta, chrono::microseconds(1));
+ filter.Sample(ta, {0, chrono::microseconds(1)});
filter.Debug();
- filter.Sample(tb, chrono::microseconds(2));
+ filter.Sample(tb, {0, chrono::microseconds(2)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 2u);
EXPECT_EQ(filter.timestamp(0),
- std::make_tuple(ta, chrono::microseconds(1)));
+ std::make_tuple(ta, BootDuration{0, chrono::microseconds(1)}));
EXPECT_EQ(filter.timestamp(1),
- std::make_tuple(tb, chrono::microseconds(2)));
+ std::make_tuple(tb, BootDuration{0, chrono::microseconds(2)}));
}
{
// A negative slope of 1 ms/second is properly applied.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
- filter.Sample(ta, chrono::microseconds(1));
+ filter.Sample(ta, {0, chrono::microseconds(1)});
filter.Debug();
- filter.Sample(tb, chrono::microseconds(0));
+ filter.Sample(tb, {0, chrono::microseconds(0)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 2u);
EXPECT_EQ(filter.timestamp(0),
- std::make_tuple(ta, chrono::microseconds(1)));
+ std::make_tuple(ta, BootDuration{0, chrono::microseconds(1)}));
EXPECT_EQ(filter.timestamp(1),
- std::make_tuple(tb, chrono::microseconds(0)));
+ std::make_tuple(tb, BootDuration{0, chrono::microseconds(0)}));
}
{
// Too much negative is ignored.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
- filter.Sample(ta, chrono::microseconds(1));
+ filter.Sample(ta, {0, chrono::microseconds(1)});
filter.Debug();
- filter.Sample(tb, -chrono::microseconds(1));
+ filter.Sample(tb, {0, -chrono::microseconds(1)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 1u);
}
{
// Too much positive pulls up the first point.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
- filter.Sample(ta, chrono::microseconds(1));
+ filter.Sample(ta, {0, chrono::microseconds(1)});
filter.Debug();
- filter.Sample(tb, chrono::microseconds(3));
+ filter.Sample(tb, {0, chrono::microseconds(3)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 2u);
- EXPECT_EQ(std::get<1>(filter.timestamp(0)), chrono::microseconds(2));
- EXPECT_EQ(std::get<1>(filter.timestamp(1)), chrono::microseconds(3));
+ EXPECT_EQ(std::get<1>(filter.timestamp(0)),
+ (BootDuration{0, chrono::microseconds(2)}));
+ EXPECT_EQ(std::get<1>(filter.timestamp(1)),
+ (BootDuration{0, chrono::microseconds(3)}));
}
{
// Too much positive slope removes points.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
- filter.Sample(ta, chrono::microseconds(1));
+ filter.Sample(ta, {0, chrono::microseconds(1)});
filter.Debug();
- filter.Sample(tb, chrono::microseconds(1));
+ filter.Sample(tb, {0, chrono::microseconds(1)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 2u);
// Now add a sample with a slope of 0.002. This should back propagate and
// remove the middle point since it violates our constraints.
- filter.Sample(tc, chrono::microseconds(3));
+ filter.Sample(tc, {0, chrono::microseconds(3)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 2u);
- EXPECT_EQ(std::get<1>(filter.timestamp(0)), chrono::microseconds(1));
- EXPECT_EQ(std::get<1>(filter.timestamp(1)), chrono::microseconds(3));
+ EXPECT_EQ(std::get<1>(filter.timestamp(0)),
+ (BootDuration{0, chrono::microseconds(1)}));
+ EXPECT_EQ(std::get<1>(filter.timestamp(1)),
+ (BootDuration{0, chrono::microseconds(3)}));
}
}
// Tests that removing points from the filter works as expected.
TEST_F(NoncausalTimestampFilterTest, PointRemoval) {
- const monotonic_clock::time_point t_before(-chrono::milliseconds(1));
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const monotonic_clock::time_point tc(chrono::milliseconds(2));
+ const logger::BootTimestamp t_before{
+ 0, monotonic_clock::time_point(-chrono::milliseconds(1))};
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootTimestamp tc{0,
+ monotonic_clock::time_point(chrono::milliseconds(2))};
// A positive slope of 1 ms/second is properly applied.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
- filter.Sample(ta, chrono::microseconds(1));
+ filter.Sample(ta, {0, chrono::microseconds(1)});
filter.Debug();
- filter.Sample(tb, chrono::microseconds(2));
+ filter.Sample(tb, {0, chrono::microseconds(2)});
filter.Debug();
- filter.Sample(tc, chrono::microseconds(1));
+ filter.Sample(tc, {0, chrono::microseconds(1)});
filter.Debug();
ASSERT_EQ(filter.timestamps_size(), 3u);
@@ -275,12 +309,14 @@
// Tests that inserting duplicate points causes the duplicates to get ignored.
TEST_F(NoncausalTimestampFilterTest, DuplicatePoints) {
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(2));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(2)};
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
@@ -294,15 +330,18 @@
// simple case.
TEST_F(NoncausalTimestampFilterTest, BackwardsInTimeSimple) {
// Start with the simple case. A valid point in the middle.
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(0));
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(0)};
- const monotonic_clock::time_point tc(chrono::milliseconds(2));
- const chrono::nanoseconds oc(chrono::microseconds(1));
- NoncausalTimestampFilter filter(node_a, node_b);
+ const BootTimestamp tc{0,
+ monotonic_clock::time_point(chrono::milliseconds(2))};
+ const BootDuration oc{0, chrono::microseconds(1)};
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tc, oc);
@@ -318,59 +357,67 @@
// Tests that inserting a duplicate point at the beginning gets ignored if it is
// more negative than the original beginning point.
TEST_F(NoncausalTimestampFilterTest, BackwardsInTimeDuplicateNegative) {
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(1));
- NoncausalTimestampFilter filter(node_a, node_b);
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(1)};
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
- filter.Sample(ta, chrono::microseconds(0));
+ filter.Sample(ta, {0, chrono::microseconds(0)});
filter.Debug();
EXPECT_EQ(filter.timestamps_size(), 2u);
- EXPECT_EQ(filter.timestamp(0), std::make_tuple(ta, chrono::microseconds(1)));
+ EXPECT_EQ(filter.timestamp(0), std::make_tuple(ta, oa));
EXPECT_EQ(filter.timestamp(1), std::make_tuple(tb, ob));
}
// Tests that inserting a better duplicate point at the beginning gets taken if
// it is more positive than the original beginning point.
TEST_F(NoncausalTimestampFilterTest, BackwardsInTimeDuplicatePositive) {
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(1));
- NoncausalTimestampFilter filter(node_a, node_b);
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(1)};
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
- filter.Sample(ta, chrono::microseconds(2));
+ filter.Sample(ta, {0, chrono::microseconds(2)});
filter.Debug();
EXPECT_EQ(filter.timestamps_size(), 2u);
- EXPECT_EQ(filter.timestamp(0), std::make_tuple(ta, chrono::microseconds(2)));
+ EXPECT_EQ(filter.timestamp(0),
+ std::make_tuple(ta, BootDuration{0, chrono::microseconds(2)}));
EXPECT_EQ(filter.timestamp(1), std::make_tuple(tb, ob));
}
// Tests that inserting a negative duplicate point in the middle is dropped.
TEST_F(NoncausalTimestampFilterTest, BackwardsInTimeMiddleDuplicateNegative) {
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(2));
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(2)};
- const monotonic_clock::time_point tc(chrono::milliseconds(2));
- const chrono::nanoseconds oc(chrono::microseconds(1));
- NoncausalTimestampFilter filter(node_a, node_b);
+ const BootTimestamp tc{0,
+ monotonic_clock::time_point(chrono::milliseconds(2))};
+ const BootDuration oc{0, chrono::microseconds(1)};
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
filter.Sample(tc, oc);
- filter.Sample(tb, chrono::microseconds(0));
+ filter.Sample(tb, {0, chrono::microseconds(0)});
filter.Debug();
EXPECT_EQ(filter.timestamps_size(), 3u);
@@ -381,25 +428,29 @@
// Tests that inserting a positive duplicate point in the middle is taken.
TEST_F(NoncausalTimestampFilterTest, BackwardsInTimeMiddleDuplicatePositive) {
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(0));
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(0)};
- const monotonic_clock::time_point tc(chrono::milliseconds(2));
- const chrono::nanoseconds oc(chrono::microseconds(1));
- NoncausalTimestampFilter filter(node_a, node_b);
+ const BootTimestamp tc{0,
+ monotonic_clock::time_point(chrono::milliseconds(2))};
+ const BootDuration oc{0, chrono::microseconds(1)};
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
filter.Sample(tc, oc);
- filter.Sample(tb, chrono::microseconds(2));
+ filter.Sample(tb, {0, chrono::microseconds(2)});
filter.Debug();
EXPECT_EQ(filter.timestamps_size(), 3u);
EXPECT_EQ(filter.timestamp(0), std::make_tuple(ta, oa));
- EXPECT_EQ(filter.timestamp(1), std::make_tuple(tb, chrono::microseconds(2)));
+ EXPECT_EQ(filter.timestamp(1),
+ std::make_tuple(tb, BootDuration{0, chrono::microseconds(2)}));
EXPECT_EQ(filter.timestamp(2), std::make_tuple(tc, oc));
}
@@ -424,22 +475,22 @@
std::array<chrono::nanoseconds, 4> o(
{chrono::microseconds(i), chrono::microseconds(j),
chrono::microseconds(k), chrono::microseconds(l)});
- NoncausalTimestampFilter forward(node_a, node_b);
+ TestingNoncausalTimestampFilter forward(node_a, node_b);
VLOG(1) << "Sorting in order";
- forward.Sample(t[0], o[0]);
- forward.Sample(t[1], o[1]);
- forward.Sample(t[2], o[2]);
- forward.Sample(t[3], o[3]);
+ forward.Sample({0, t[0]}, {0, o[0]});
+ forward.Sample({0, t[1]}, {0, o[1]});
+ forward.Sample({0, t[2]}, {0, o[2]});
+ forward.Sample({0, t[3]}, {0, o[3]});
// Confirm everything is within the velocity bounds.
for (size_t i = 1; i < forward.timestamps_size(); ++i) {
const chrono::nanoseconds dt =
- std::get<0>(forward.timestamp(i)) -
- std::get<0>(forward.timestamp(i - 1));
+ std::get<0>(forward.timestamp(i)).time -
+ std::get<0>(forward.timestamp(i - 1)).time;
const chrono::nanoseconds doffset =
- std::get<1>(forward.timestamp(i)) -
- std::get<1>(forward.timestamp(i - 1));
+ std::get<1>(forward.timestamp(i)).duration -
+ std::get<1>(forward.timestamp(i - 1)).duration;
EXPECT_GE(doffset, -dt * kMaxVelocity());
EXPECT_LE(doffset, dt * kMaxVelocity());
}
@@ -457,20 +508,20 @@
std::make_pair(t[indices[3]], o[indices[3]])});
VLOG(1) << "Sorting randomized";
- NoncausalTimestampFilter random(node_a, node_b);
- random.Sample(pairs[0].first, pairs[0].second);
+ TestingNoncausalTimestampFilter random(node_a, node_b);
+ random.Sample({0, pairs[0].first}, {0, pairs[0].second});
if (VLOG_IS_ON(1)) {
random.Debug();
}
- random.Sample(pairs[1].first, pairs[1].second);
+ random.Sample({0, pairs[1].first}, {0, pairs[1].second});
if (VLOG_IS_ON(1)) {
random.Debug();
}
- random.Sample(pairs[2].first, pairs[2].second);
+ random.Sample({0, pairs[2].first}, {0, pairs[2].second});
if (VLOG_IS_ON(1)) {
random.Debug();
}
- random.Sample(pairs[3].first, pairs[3].second);
+ random.Sample({0, pairs[3].first}, {0, pairs[3].second});
if (VLOG_IS_ON(1)) {
random.Debug();
}
@@ -483,7 +534,7 @@
forward.Debug();
LOG(INFO) << "Random";
for (int i = 0; i < 4; ++i) {
- LOG(INFO) << "Sample(" << pairs[i].first << ", "
+ LOG(INFO) << "Sample({0, " << pairs[i].first << "}, "
<< pairs[i].second.count() << ")";
}
random.Debug();
@@ -503,38 +554,43 @@
// Tests that the right points get frozen when we ask for them to be.
TEST_F(NoncausalTimestampFilterTest, FrozenTimestamps) {
// Start with the simple case. A valid point in the middle.
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(1));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(1)};
- const monotonic_clock::time_point tb(chrono::milliseconds(1));
- const chrono::nanoseconds ob(chrono::microseconds(0));
+ const BootTimestamp tb{0,
+ monotonic_clock::time_point(chrono::milliseconds(1))};
+ const BootDuration ob{0, chrono::microseconds(0)};
- const monotonic_clock::time_point tc(chrono::milliseconds(2));
- const chrono::nanoseconds oc(chrono::microseconds(1));
+ const BootTimestamp tc{0,
+ monotonic_clock::time_point(chrono::milliseconds(2))};
+ const BootDuration oc{0, chrono::microseconds(1)};
// Test for our node.
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tc, oc);
filter.Sample(tb, ob);
ASSERT_EQ(filter.timestamps_size(), 3u);
- filter.FreezeUntil(ta - chrono::microseconds(1));
+ filter.FreezeUntil(ta - chrono::microseconds(1),
+ {0, monotonic_clock::min_time});
EXPECT_TRUE(filter.frozen(0));
EXPECT_FALSE(filter.frozen(1));
- filter.FreezeUntil(ta);
+ filter.FreezeUntil(ta, {0, monotonic_clock::min_time});
EXPECT_TRUE(filter.frozen(0));
EXPECT_FALSE(filter.frozen(1));
- filter.FreezeUntil(ta + chrono::microseconds(1));
+ filter.FreezeUntil(ta + chrono::microseconds(1),
+ {0, monotonic_clock::min_time});
EXPECT_TRUE(filter.frozen(0));
EXPECT_TRUE(filter.frozen(1));
EXPECT_FALSE(filter.frozen(2));
- filter.FreezeUntil(tc);
+ filter.FreezeUntil(tc, {0, monotonic_clock::min_time});
EXPECT_TRUE(filter.frozen(0));
EXPECT_TRUE(filter.frozen(1));
EXPECT_TRUE(filter.frozen(2));
@@ -543,44 +599,49 @@
// Test that fully frozen doesn't apply when there is 1 time and we are before
// the start.
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
- filter.FreezeUntil(ta - chrono::microseconds(1));
+ filter.FreezeUntil(ta - chrono::microseconds(1),
+ {0, monotonic_clock::min_time});
EXPECT_TRUE(filter.frozen(0));
// New samples aren't frozen until they are explicitly frozen.
filter.Sample(tb, ob);
EXPECT_FALSE(filter.frozen(1));
- filter.FreezeUntil(ta + chrono::microseconds(1));
+ filter.FreezeUntil(ta + chrono::microseconds(1),
+ {0, monotonic_clock::min_time});
EXPECT_TRUE(filter.frozen(1));
}
// Test the remote node
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tc, oc);
filter.Sample(tb, ob);
ASSERT_EQ(filter.timestamps_size(), 3u);
- filter.FreezeUntilRemote(ta + oa - chrono::microseconds(1));
+ // Trigger FreezeUntilRemote
+ filter.FreezeUntil({0, monotonic_clock::min_time},
+ ta + oa.duration - chrono::microseconds(1));
EXPECT_TRUE(filter.frozen(0));
EXPECT_FALSE(filter.frozen(1));
- filter.FreezeUntilRemote(ta + oa);
+ filter.FreezeUntil({0, monotonic_clock::min_time}, ta + oa);
EXPECT_TRUE(filter.frozen(0));
EXPECT_FALSE(filter.frozen(1));
- filter.FreezeUntilRemote(ta + oa + chrono::microseconds(1));
+ filter.FreezeUntil({0, monotonic_clock::min_time},
+ ta + oa.duration + chrono::microseconds(1));
EXPECT_TRUE(filter.frozen(0));
EXPECT_TRUE(filter.frozen(1));
EXPECT_FALSE(filter.frozen(2));
- filter.FreezeUntilRemote(tc + oc);
+ filter.FreezeUntil({0, monotonic_clock::min_time}, tc + oc);
EXPECT_TRUE(filter.frozen(0));
EXPECT_TRUE(filter.frozen(1));
EXPECT_TRUE(filter.frozen(2));
@@ -589,16 +650,19 @@
// Test that fully frozen doesn't apply when there is 1 time and we are before
// the start on the remote node.
{
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
- filter.FreezeUntilRemote(ta + oa - chrono::microseconds(1));
+ // Trigger FreezeUntilRemote
+ filter.FreezeUntil({0, monotonic_clock::min_time},
+ ta + oa.duration - chrono::microseconds(1));
EXPECT_TRUE(filter.frozen(0));
filter.Sample(tb, ob);
EXPECT_FALSE(filter.frozen(1));
- filter.FreezeUntilRemote(ta + oa + chrono::microseconds(1));
+ filter.FreezeUntil({0, monotonic_clock::min_time},
+ ta + oa.duration + chrono::microseconds(1));
EXPECT_TRUE(filter.frozen(1));
}
@@ -607,42 +671,50 @@
// Tests that we refuse to modify frozen points in a bunch of different ways.
TEST_F(NoncausalTimestampFilterDeathTest, FrozenTimestamps) {
// Start with the simple case. A valid point in the middle.
- const monotonic_clock::time_point ta(chrono::milliseconds(0));
- const chrono::nanoseconds oa(chrono::microseconds(100));
+ const BootTimestamp ta{0,
+ monotonic_clock::time_point(chrono::milliseconds(0))};
+ const BootDuration oa{0, chrono::microseconds(100)};
- const monotonic_clock::time_point tb(chrono::milliseconds(100));
- const chrono::nanoseconds ob(chrono::microseconds(0));
+ const BootTimestamp tb{
+ 0, monotonic_clock::time_point(chrono::milliseconds(100))};
+ const BootDuration ob{0, chrono::microseconds(0)};
- const monotonic_clock::time_point tc(chrono::milliseconds(200));
- const chrono::nanoseconds oc(chrono::microseconds(100));
+ const BootTimestamp tc{
+ 0, monotonic_clock::time_point(chrono::milliseconds(200))};
+ const BootDuration oc{0, chrono::microseconds(100)};
{
// Test that adding before a frozen sample explodes.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
ASSERT_EQ(filter.timestamps_size(), 2u);
- filter.FreezeUntil(tb);
+ filter.FreezeUntil(tb, {0, monotonic_clock::min_time});
- EXPECT_DEATH({ filter.Sample(tb, oa); },
- "monotonic_now > frozen_time_ \\(0.100000000sec vs. "
- "0.100000000sec\\) : test_a -> test_b Tried to insert "
- "0.100000000sec before the frozen time of 0.100000000sec. "
- "Increase --time_estimation_buffer_seconds to greater than 0");
+ EXPECT_DEATH(
+ {
+ filter.Sample(tb, oa);
+ },
+ "monotonic_now > frozen_time_ \\(0.100000000sec vs. "
+ "0.100000000sec\\) : test_a -> test_b Tried to insert "
+ "0.100000000sec before the frozen time of 0.100000000sec. "
+ "Increase --time_estimation_buffer_seconds to greater than 0");
}
{
// Test that if we freeze it all after the end, we refuse any new samples.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
ASSERT_EQ(filter.timestamps_size(), 2u);
- filter.FreezeUntil(tc);
+ filter.FreezeUntil(tc, {0, monotonic_clock::min_time});
EXPECT_DEATH(
- { filter.Sample(tc, oc); },
+ {
+ filter.Sample(tc, oc);
+ },
"test_a -> test_b Returned a horizontal line previously and then got a "
"new sample at "
"0.200000000sec, 0.2 seconds after the last sample at 0.000000000sec");
@@ -650,30 +722,33 @@
{
// Test that if we freeze it all after the end, we refuse any new samples.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tc, oc);
ASSERT_EQ(filter.timestamps_size(), 2u);
- filter.FreezeUntil(tc);
+ filter.FreezeUntil(tc, {0, monotonic_clock::min_time});
- EXPECT_DEATH({ filter.Sample(tb, ob); },
- "monotonic_now > frozen_time_ \\(0.100000000sec vs. "
- "0.200000000sec\\) : test_a -> test_b Tried to insert "
- "0.100000000sec before the frozen time of 0.200000000sec. "
- "Increase --time_estimation_buffer_seconds to greater than 0.1");
+ EXPECT_DEATH(
+ {
+ filter.Sample(tb, ob);
+ },
+ "monotonic_now > frozen_time_ \\(0.100000000sec vs. "
+ "0.200000000sec\\) : test_a -> test_b Tried to insert "
+ "0.100000000sec before the frozen time of 0.200000000sec. "
+ "Increase --time_estimation_buffer_seconds to greater than 0.1");
}
{
// Test that if we freeze, and a point in the middle triggers back
// propagation, we refuse.
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(ta, oa);
filter.Sample(tb, ob);
filter.Sample(tc, oc);
ASSERT_EQ(filter.timestamps_size(), 3u);
- filter.FreezeUntil(tb);
+ filter.FreezeUntil(tb, {0, monotonic_clock::min_time});
EXPECT_DEATH({ filter.Sample(tb, oa); },
"monotonic_now > frozen_time_ \\(0.100000000sec vs. "
@@ -845,69 +920,69 @@
// Tests that FindTimestamps finds timestamps in a sequence.
TEST_F(NoncausalTimestampFilterTest, FindTimestamps) {
- const monotonic_clock::time_point e = monotonic_clock::epoch();
+ const BootTimestamp e{0, monotonic_clock::epoch()};
// Note: t1, t2, t3 need to be picked such that the slop is small so filter
// doesn't modify the timestamps.
- const monotonic_clock::time_point t1 = e + chrono::nanoseconds(0);
- const chrono::nanoseconds o1 = chrono::nanoseconds(100);
- const monotonic_clock::time_point t2 = e + chrono::microseconds(1000);
- const chrono::nanoseconds o2 = chrono::nanoseconds(150);
- const monotonic_clock::time_point t3 = e + chrono::microseconds(2000);
- const chrono::nanoseconds o3 = chrono::nanoseconds(50);
+ const BootTimestamp t1 = e + chrono::nanoseconds(0);
+ const BootDuration o1{0, chrono::nanoseconds(100)};
+ const BootTimestamp t2 = e + chrono::microseconds(1000);
+ const BootDuration o2{0, chrono::nanoseconds(150)};
+ const BootTimestamp t3 = e + chrono::microseconds(2000);
+ const BootDuration o3{0, chrono::nanoseconds(50)};
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(t1, o1);
filter.Sample(t2, o2);
filter.Sample(t3, o3);
// Try points before, after, and at each of the points in the line.
- EXPECT_THAT(filter.FindTimestamps(e - chrono::microseconds(10)),
+ EXPECT_THAT(filter.FindTimestamps(e - chrono::microseconds(10), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
::testing::Eq(std::make_tuple(t2, o2))));
- EXPECT_THAT(filter.FindTimestamps(e - chrono::microseconds(10), 0.9),
+ EXPECT_THAT(filter.FindTimestamps(e - chrono::microseconds(10), 0.9, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
::testing::Eq(std::make_tuple(t2, o2))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(0)),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(0), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
::testing::Eq(std::make_tuple(t2, o2))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(0), 0.8),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(0), 0.8, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
::testing::Eq(std::make_tuple(t2, o2))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(100)),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(100), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
::testing::Eq(std::make_tuple(t2, o2))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(100), 0.7),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(100), 0.7, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t1, o1)),
::testing::Eq(std::make_tuple(t2, o2))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1000)),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1000), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1000), 0.0),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1000), 0.0, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1500)),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1500), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1500), 0.0),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(1500), 0.0, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2000)),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2000), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2000), 0.1),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2000), 0.1, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2500)),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2500), 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
- EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2500), 0.0),
+ EXPECT_THAT(filter.FindTimestamps(e + chrono::microseconds(2500), 0.0, 0),
::testing::Pair(::testing::Eq(std::make_tuple(t2, o2)),
::testing::Eq(std::make_tuple(t3, o3))));
}
@@ -915,73 +990,73 @@
// Tests that Offset returns results indicative of it calling InterpolateOffset
// and FindTimestamps correctly.
TEST_F(NoncausalTimestampFilterTest, Offset) {
- const monotonic_clock::time_point e = monotonic_clock::epoch();
+ const BootTimestamp e{0, monotonic_clock::epoch()};
// Note: t1, t2, t3 need to be picked such that the slope is small so filter
// doesn't modify the timestamps.
- const monotonic_clock::time_point t1 = e + chrono::nanoseconds(1000);
- const chrono::nanoseconds o1 = chrono::nanoseconds(100);
- const double o1d = static_cast<double>(o1.count());
+ const BootTimestamp t1 = e + chrono::nanoseconds(1000);
+ const BootDuration o1{0, chrono::nanoseconds(100)};
+ const double o1d = static_cast<double>(o1.duration.count());
- const monotonic_clock::time_point t2 = e + chrono::microseconds(2000);
- const chrono::nanoseconds o2 = chrono::nanoseconds(150);
- const double o2d = static_cast<double>(o2.count());
+ const BootTimestamp t2 = e + chrono::microseconds(2000);
+ const BootDuration o2{0, chrono::nanoseconds(150)};
+ const double o2d = static_cast<double>(o2.duration.count());
- const monotonic_clock::time_point t3 = e + chrono::microseconds(3000);
- const chrono::nanoseconds o3 = chrono::nanoseconds(50);
- const double o3d = static_cast<double>(o3.count());
+ const BootTimestamp t3 = e + chrono::microseconds(3000);
+ const BootDuration o3{0, chrono::nanoseconds(50)};
+ const double o3d = static_cast<double>(o3.duration.count());
- const monotonic_clock::time_point t4 = e + chrono::microseconds(4000);
+ const BootTimestamp t4 = e + chrono::microseconds(4000);
- NoncausalTimestampFilter filter(node_a, node_b);
+ TestingNoncausalTimestampFilter filter(node_a, node_b);
filter.Sample(t1, o1);
// 1 point is handled properly.
- EXPECT_EQ(filter.Offset(t1), o1);
- EXPECT_EQ(filter.Offset(t1, 0.0), std::make_pair(o1, 0.0));
+ EXPECT_EQ(filter.Offset(t1, 0), o1);
+ EXPECT_EQ(filter.Offset(t1, 0.0, 0), std::make_pair(o1, 0.0));
// Check if we ask for something away from point that we get an offset
// based on the MaxVelocity allowed
- const double offset_pre = -(t1 - e).count() * kMaxVelocity();
- EXPECT_EQ(filter.Offset(e),
+ const double offset_pre = -(t1.time - e.time).count() * kMaxVelocity();
+ EXPECT_EQ(filter.Offset(e, 0),
o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)));
- EXPECT_EQ(filter.Offset(e, 0.0), std::make_pair(o1, offset_pre));
+ EXPECT_EQ(filter.Offset(e, 0.0, 0), std::make_pair(o1, offset_pre));
- double offset_post = -(t2 - t1).count() * kMaxVelocity();
- EXPECT_EQ(filter.Offset(t2),
+ double offset_post = -(t2.time - t1.time).count() * kMaxVelocity();
+ EXPECT_EQ(filter.Offset(t2, 0),
o1 + chrono::nanoseconds(static_cast<int64_t>(offset_post)));
- EXPECT_EQ(filter.Offset(t2, 0.0), std::make_pair(o1, offset_post));
+ EXPECT_EQ(filter.Offset(t2, 0.0, 0), std::make_pair(o1, offset_post));
filter.Sample(t2, o2);
filter.Sample(t3, o3);
- EXPECT_EQ(filter.Offset(t1), o1);
- EXPECT_EQ(filter.Offset(t2), o2);
- EXPECT_EQ(filter.Offset(t3), o3);
+ EXPECT_EQ(filter.Offset(t1, 0), o1);
+ EXPECT_EQ(filter.Offset(t2, 0), o2);
+ EXPECT_EQ(filter.Offset(t3, 0), o3);
- EXPECT_EQ(filter.Offset(t1, 0.0), std::make_pair(o1, 0.0));
+ EXPECT_EQ(filter.Offset(t1, 0.0, 0), std::make_pair(o1, 0.0));
EXPECT_EQ(filter.Offset(
- e + (t2.time_since_epoch() + t1.time_since_epoch()) / 2, 0.0),
+ e + (t2.time_since_epoch() + t1.time_since_epoch()) / 2, 0.0, 0),
std::make_pair(o1, (o2d - o1d) / 2.));
- EXPECT_EQ(filter.Offset(t2, 0.0), std::make_pair(o2, 0.0));
+ EXPECT_EQ(filter.Offset(t2, 0.0, 0), std::make_pair(o2, 0.0));
- EXPECT_EQ(filter.Offset(
- e + (t2.time_since_epoch() + t3.time_since_epoch()) / 2, 0.),
- std::make_pair(o2, (o2d + o3d) / 2. - o2d));
+ EXPECT_EQ(
+ filter.Offset(e + (t2.time_since_epoch() + t3.time_since_epoch()) / 2,
+ 0.0, 0),
+ std::make_pair(o2, (o2d + o3d) / 2. - o2d));
- EXPECT_EQ(filter.Offset(t3, 0.0), std::make_pair(o3, 0.0));
+ EXPECT_EQ(filter.Offset(t3, 0.0, 0), std::make_pair(o3, 0.0));
// Check that we still get same answer for times before our sample data...
- EXPECT_EQ(filter.Offset(e),
+ EXPECT_EQ(filter.Offset(e, 0),
o1 + chrono::nanoseconds(static_cast<int64_t>(offset_pre)));
- EXPECT_EQ(filter.Offset(e, 0.0), std::make_pair(o1, offset_pre));
+ EXPECT_EQ(filter.Offset(e, 0.0, 0), std::make_pair(o1, offset_pre));
// ... and after
- offset_post = -(t4 - t3).count() * kMaxVelocity();
- EXPECT_EQ(
- filter.Offset(t4).count(),
- (o3 + chrono::nanoseconds(static_cast<int64_t>(offset_post))).count());
- EXPECT_EQ(filter.Offset(t4, 0.0), std::make_pair(o3, offset_post));
+ offset_post = -(t4.time - t3.time).count() * kMaxVelocity();
+ EXPECT_EQ(filter.Offset(t4, 0),
+ (o3 + chrono::nanoseconds(static_cast<int64_t>(offset_post))));
+ EXPECT_EQ(filter.Offset(t4, 0.0, 0), std::make_pair(o3, offset_post));
}
// Run a couple of points through the estimator and confirm it works.
@@ -994,43 +1069,45 @@
const Node *node_a = &node_a_buffer.message();
const Node *node_b = &node_b_buffer.message();
- const monotonic_clock::time_point ta1(chrono::milliseconds(1000));
- const monotonic_clock::time_point ta2 = ta1 + chrono::milliseconds(10);
- const monotonic_clock::time_point ta3 = ta1 + chrono::milliseconds(20);
+ const BootTimestamp ta1{
+ 0, monotonic_clock::time_point(chrono::milliseconds(1000))};
+ const BootTimestamp ta2 = ta1 + chrono::milliseconds(10);
+ const BootTimestamp ta3 = ta1 + chrono::milliseconds(20);
- const monotonic_clock::time_point tb1(chrono::milliseconds(4000));
- const monotonic_clock::time_point tb2 =
+ const BootTimestamp tb1{
+ 0, monotonic_clock::time_point(chrono::milliseconds(4000))};
+ const BootTimestamp tb2 =
tb1 + chrono::milliseconds(10) + chrono::nanoseconds(100);
- const monotonic_clock::time_point tb3 = tb1 + chrono::milliseconds(20);
+ const BootTimestamp tb3 = tb1 + chrono::milliseconds(20);
NoncausalOffsetEstimator estimator(node_a, node_b);
// Add 3 timestamps in and confirm that the slopes come out reasonably.
estimator.Sample(node_a, ta1, tb1);
estimator.Sample(node_b, tb1, ta1);
- EXPECT_EQ(estimator.a_timestamps_size(), 1u);
- EXPECT_EQ(estimator.b_timestamps_size(), 1u);
+ EXPECT_EQ(estimator.GetFilter(node_a)->timestamps_size(), 1u);
+ EXPECT_EQ(estimator.GetFilter(node_b)->timestamps_size(), 1u);
estimator.Sample(node_a, ta2, tb2);
estimator.Sample(node_b, tb2, ta2);
- EXPECT_EQ(estimator.a_timestamps_size(), 2u);
- EXPECT_EQ(estimator.b_timestamps_size(), 2u);
+ EXPECT_EQ(estimator.GetFilter(node_a)->timestamps_size(), 2u);
+ EXPECT_EQ(estimator.GetFilter(node_b)->timestamps_size(), 2u);
estimator.ReverseSample(node_b, tb3, ta3);
estimator.ReverseSample(node_a, ta3, tb3);
- EXPECT_EQ(estimator.a_timestamps_size(), 3u);
- EXPECT_EQ(estimator.b_timestamps_size(), 3u);
+ EXPECT_EQ(estimator.GetFilter(node_a)->timestamps_size(), 3u);
+ EXPECT_EQ(estimator.GetFilter(node_b)->timestamps_size(), 3u);
estimator.Pop(node_a, ta2);
estimator.Pop(node_b, tb2);
- EXPECT_EQ(estimator.a_timestamps_size(), 2u);
- EXPECT_EQ(estimator.b_timestamps_size(), 2u);
+ EXPECT_EQ(estimator.GetFilter(node_a)->timestamps_size(), 2u);
+ EXPECT_EQ(estimator.GetFilter(node_b)->timestamps_size(), 2u);
// And dropping down to 1 point means 0 slope.
estimator.Pop(node_a, ta3);
estimator.Pop(node_b, tb3);
- EXPECT_EQ(estimator.a_timestamps_size(), 1u);
- EXPECT_EQ(estimator.b_timestamps_size(), 1u);
+ EXPECT_EQ(estimator.GetFilter(node_a)->timestamps_size(), 1u);
+ EXPECT_EQ(estimator.GetFilter(node_b)->timestamps_size(), 1u);
}
} // namespace testing
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
index f3c3ebf..a1b6121 100644
--- a/y2020/control_loops/drivetrain/localizer_test.cc
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -24,6 +24,7 @@
namespace drivetrain {
namespace testing {
+using aos::logger::BootTimestamp;
using frc971::control_loops::drivetrain::DrivetrainConfig;
using frc971::control_loops::drivetrain::Goal;
using frc971::control_loops::drivetrain::LocalizerControl;
@@ -133,13 +134,13 @@
event_loop_factory()->SetTimeConverter(&time_converter_);
CHECK_EQ(aos::configuration::GetNodeIndex(configuration(), roborio_), 6);
CHECK_EQ(aos::configuration::GetNodeIndex(configuration(), pi1_), 1);
- time_converter_.AddMonotonic({monotonic_clock::epoch() + kPiTimeOffset,
- monotonic_clock::epoch() + kPiTimeOffset,
- monotonic_clock::epoch() + kPiTimeOffset,
- monotonic_clock::epoch() + kPiTimeOffset,
- monotonic_clock::epoch() + kPiTimeOffset,
- monotonic_clock::epoch() + kPiTimeOffset,
- monotonic_clock::epoch()});
+ time_converter_.AddMonotonic({BootTimestamp::epoch() + kPiTimeOffset,
+ BootTimestamp::epoch() + kPiTimeOffset,
+ BootTimestamp::epoch() + kPiTimeOffset,
+ BootTimestamp::epoch() + kPiTimeOffset,
+ BootTimestamp::epoch() + kPiTimeOffset,
+ BootTimestamp::epoch() + kPiTimeOffset,
+ BootTimestamp::epoch()});
set_team_id(frc971::control_loops::testing::kTeamNumber);
set_battery_voltage(12.0);