blob: 33702e08c061f3ec9aaa1a965c480341c3c2ef36 [file] [log] [blame]
include "aos/configuration.fbs";
namespace aos.logger;
// A log file is a sequence of size prefixed flatbuffers.
// The first flatbuffer will be the LogFileHeader, followed by an arbitrary
// number of MessageHeaders.
//
// The log file starts at the time demarcated in the header on the monotonic
// clock. There will be any number of messages per channel logged before the
// start time. These messages are logged so that fetchers can retrieve the
// state of the system at the start of the logfile for things like capturing
// parameters. In replay, they should be made available to fetchers, but not
// trigger watchers.
table LogFileHeader {
// Time this log file started on the monotonic clock in nanoseconds.
// If this isn't known (the log file is being recorded from another node
// where we don't know the time offset), both timestamps will be min_time.
monotonic_start_time:int64 = -9223372036854775808 (id: 0);
// Time this log file started on the realtime clock in nanoseconds.
realtime_start_time:int64 = -9223372036854775808 (id: 1);
// Messages are not written in order to disk. They will be out of order by
// at most this duration (in nanoseconds). If the log reader buffers until
// it finds messages this much newer than it's simulation time, it will never
// find a message out of order.
max_out_of_order_duration:long (id: 2);
// The configuration of the channels. It is valid to have a log file with
// just this filled out. That is a config only file which will be pointed to
// by files using configuration_sha256 and optionally configuration_path.
configuration:aos.Configuration (id: 3);
// sha256 of the configuration used. If this is set, configuration will not
// be set.
configuration_sha256:string (id: 16);
// Name of the device which this log file is for.
name:string (id: 4);
// The current node, if known and running in a multi-node configuration.
node:Node (id: 5);
// All UUIDs are uuid4.
// A log file is made up of a bunch of log files and parts. These build up
// a tree. Every .bfbs file has a LogFileHeader at the start.
//
// /-- basename_pi1_data.part0.bfbs, basename_pi1_data.part1.bfbs, etc.
// ---- basename_timestamps/pi1/aos/remote_timestamps/pi2/aos.logger.MessageHeader.part0.bfbs, etc.
// \-- basename_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0.bfbs, etc.
// All log files and parts from a single logging event will have
// the same uuid. This should be all the files generated on a single node.
// Used to correlate files recorded together.
log_event_uuid:string (id: 6);
// All log parts generated by a single Logger instance will have the same
// value here.
logger_instance_uuid: string (id: 10);
// All log parts generated on a single node while it is booted will have the
// same value here. It also matches with the one used by systemd.
logger_node_boot_uuid: string (id: 11);
// Empty if we didn't have one at the time.
source_node_boot_uuid: string (id: 13);
// Timestamps that this logfile started at on the logger's clocks. This is
// mostly useful when trying to deduce the order of node reboots.
logger_monotonic_start_time:int64 = -9223372036854775808 (id: 14);
logger_realtime_start_time:int64 = -9223372036854775808 (id: 15);
// All log events across all nodes produced by a single high-level start event
// will have the same value here.
log_start_uuid: string (id: 12);
// Part files which go together all have headers. When creating a log file
// with multiple parts, the logger should stop writing to part n-1 as soon
// as it starts writing to part n, and write messages as though there was
// just 1 big file. Therefore, part files won't be self standing, since
// they won't have data fetched at the beginning.
// If data is logged before the time offset can be established to the other
// node, the start time will be monotonic_clock::min_time, and a new part file
// will be created when the start time is known.
// All the parts which go together have the same uuid.
parts_uuid:string (id: 7);
// And the parts_index corresponds to which part this is in the sequence. The
// index should start at 0.
parts_index:int32 (id: 8);
// The node the data was logged on, if known and running in a multi-node configuration.
logger_node:Node (id: 9);
}
// Table holding a message.
table MessageHeader {
// Index into the channel datastructure in the log file header. This
// provides the data type.
channel_index:uint (id: 0);
// Time this message was sent on the monotonic clock in nanoseconds on this
// node.
monotonic_sent_time:long (id: 1);
// Time this message was sent on the realtime clock in nanoseconds on this
// node.
realtime_sent_time:long (id: 2);
// Index into the ipc queue of this message. This should start with 0 and
// always monotonically increment if no messages were ever lost. It will
// wrap at a multiple of the queue size.
queue_index:uint (id: 3);
// TODO(austin): Format? Compressed?
// The nested flatbuffer.
data:[ubyte] (id: 4);
// Time this message was sent on the monotonic clock of the remote node in
// nanoseconds.
monotonic_remote_time:int64 = -9223372036854775808 (id: 5);
// Time this message was sent on the realtime clock of the remote node in
// nanoseconds.
realtime_remote_time:int64 = -9223372036854775808 (id: 6);
// Queue index of this message on the remote node.
remote_queue_index:uint32 = 4294967295 (id: 7);
}
root_type MessageHeader;