blob: 88476ed15438fdeec164b24da2123ddb30e64821 [file] [log] [blame]
James Kuszmaul4ed5fb12022-03-22 15:20:04 -07001#include "aos/util/mcap_logger.h"
2
3#include "absl/strings/str_replace.h"
James Kuszmaule4aa01d2022-06-28 14:09:02 -07004#include "aos/configuration_schema.h"
James Kuszmaulc31d7362022-05-27 14:20:04 -07005#include "aos/flatbuffer_merge.h"
James Kuszmaul5ab990d2022-11-07 16:35:49 -08006#include "lz4/lz4.h"
7#include "lz4/lz4frame.h"
James Kuszmaul4ed5fb12022-03-22 15:20:04 -07008#include "single_include/nlohmann/json.hpp"
9
James Kuszmaulc31d7362022-05-27 14:20:04 -070010DEFINE_uint64(mcap_chunk_size, 10'000'000,
James Kuszmaul5c56ed32022-03-30 15:10:07 -070011 "Size, in bytes, of individual MCAP chunks");
James Kuszmaulc31d7362022-05-27 14:20:04 -070012DEFINE_bool(fetch, false,
13 "Whether to fetch most recent messages at start of logfile. Turn "
14 "this on if there are, e.g., one-time messages sent before the "
15 "start of the logfile that you need access to. Turn it off if you "
16 "don't want to deal with having messages that have timestamps that "
17 "may be arbitrarily far before any other interesting messages.");
James Kuszmaul5c56ed32022-03-30 15:10:07 -070018
James Kuszmaul4ed5fb12022-03-22 15:20:04 -070019namespace aos {
James Kuszmaulb3fba252022-04-06 15:13:31 -070020
James Kuszmaul4ed5fb12022-03-22 15:20:04 -070021nlohmann::json JsonSchemaForFlatbuffer(const FlatbufferType &type,
22 JsonSchemaRecursion recursion_level) {
23 nlohmann::json schema;
24 if (recursion_level == JsonSchemaRecursion::kTopLevel) {
25 schema["$schema"] = "https://json-schema.org/draft/2020-12/schema";
26 }
27 schema["type"] = "object";
28 nlohmann::json properties;
29 for (int index = 0; index < type.NumberFields(); ++index) {
30 nlohmann::json field;
31 const bool is_array = type.FieldIsRepeating(index);
32 if (type.FieldIsSequence(index)) {
33 // For sub-tables/structs, just recurse.
34 nlohmann::json subtype = JsonSchemaForFlatbuffer(
35 type.FieldType(index), JsonSchemaRecursion::kNested);
36 if (is_array) {
37 field["type"] = "array";
38 field["items"] = subtype;
39 } else {
40 field = subtype;
41 }
42 } else {
43 std::string elementary_type;
44 switch (type.FieldElementaryType(index)) {
45 case flatbuffers::ET_UTYPE:
46 case flatbuffers::ET_CHAR:
47 case flatbuffers::ET_UCHAR:
48 case flatbuffers::ET_SHORT:
49 case flatbuffers::ET_USHORT:
50 case flatbuffers::ET_INT:
51 case flatbuffers::ET_UINT:
52 case flatbuffers::ET_LONG:
53 case flatbuffers::ET_ULONG:
54 case flatbuffers::ET_FLOAT:
55 case flatbuffers::ET_DOUBLE:
56 elementary_type = "number";
57 break;
58 case flatbuffers::ET_BOOL:
59 elementary_type = "boolean";
60 break;
61 case flatbuffers::ET_STRING:
62 elementary_type = "string";
63 break;
64 case flatbuffers::ET_SEQUENCE:
65 if (type.FieldIsEnum(index)) {
66 elementary_type = "string";
67 } else {
68 LOG(FATAL) << "Should not encounter any sequence fields here.";
69 }
70 break;
71 }
72 if (is_array) {
73 field["type"] = "array";
74 field["items"]["type"] = elementary_type;
75 } else {
76 field["type"] = elementary_type;
77 }
78 }
79 // the nlohmann::json [] operator needs an actual string, not just a
80 // string_view :(.
81 properties[std::string(type.FieldName(index))] = field;
82 }
83 schema["properties"] = properties;
84 return schema;
85}
86
James Kuszmaul5ab990d2022-11-07 16:35:49 -080087namespace {
88std::string_view CompressionName(McapLogger::Compression compression) {
89 switch (compression) {
90 case McapLogger::Compression::kNone:
91 return "";
92 case McapLogger::Compression::kLz4:
93 return "lz4";
94 }
95 LOG(FATAL) << "Unreachable.";
96}
97} // namespace
98
James Kuszmaulc31d7362022-05-27 14:20:04 -070099McapLogger::McapLogger(EventLoop *event_loop, const std::string &output_path,
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800100 Serialization serialization,
101 CanonicalChannelNames canonical_channels,
102 Compression compression)
James Kuszmaulc31d7362022-05-27 14:20:04 -0700103 : event_loop_(event_loop),
104 output_(output_path),
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700105 serialization_(serialization),
James Kuszmaul9f607c62022-10-27 17:01:55 -0700106 canonical_channels_(canonical_channels),
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800107 compression_(compression),
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700108 configuration_channel_([]() {
109 // Setup a fake Channel for providing the configuration in the MCAP
110 // file. This is included for convenience so that consumers of the MCAP
111 // file can actually dereference things like the channel indices in AOS
112 // timing reports.
113 flatbuffers::FlatBufferBuilder fbb;
114 flatbuffers::Offset<flatbuffers::String> name_offset =
115 fbb.CreateString("");
116 flatbuffers::Offset<flatbuffers::String> type_offset =
117 fbb.CreateString("aos.Configuration");
118 flatbuffers::Offset<reflection::Schema> schema_offset =
119 aos::CopyFlatBuffer(
120 aos::FlatbufferSpan<reflection::Schema>(ConfigurationSchema()),
121 &fbb);
122 Channel::Builder channel(fbb);
123 channel.add_name(name_offset);
124 channel.add_type(type_offset);
125 channel.add_schema(schema_offset);
126 fbb.Finish(channel.Finish());
127 return fbb.Release();
128 }()),
129 configuration_(CopyFlatBuffer(event_loop_->configuration())) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700130 event_loop->SkipTimingReport();
131 event_loop->SkipAosLog();
132 CHECK(output_);
133 WriteMagic();
134 WriteHeader();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700135 // Schemas and channels get written out both at the start and end of the file,
136 // per the MCAP spec.
137 WriteSchemasAndChannels(RegisterHandlers::kYes);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700138}
139
140McapLogger::~McapLogger() {
James Kuszmaulb3fba252022-04-06 15:13:31 -0700141 // If we have any data remaining, write one last chunk.
James Kuszmaul36a25f42022-10-28 10:18:00 -0700142 for (auto &pair : current_chunks_) {
143 if (pair.second.data.tellp() > 0) {
144 WriteChunk(&pair.second);
145 }
James Kuszmaulb3fba252022-04-06 15:13:31 -0700146 }
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700147 WriteDataEnd();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700148
149 // Now we enter the Summary section, where we write out all the channel/index
150 // information that readers need to be able to seek to arbitrary locations
151 // within the log.
152 const uint64_t summary_offset = output_.tellp();
153 const SummaryOffset chunk_indices_offset = WriteChunkIndices();
154 const SummaryOffset stats_offset = WriteStatistics();
155 // Schemas/Channels need to get reproduced in the summary section for random
156 // access reading.
157 const std::vector<SummaryOffset> offsets =
158 WriteSchemasAndChannels(RegisterHandlers::kNo);
159
160 // Next we have the summary offset section, which references the individual
161 // pieces of the summary section.
162 const uint64_t summary_offset_offset = output_.tellp();
163
164 // SummarytOffset's must all be the final thing before the footer.
165 WriteSummaryOffset(chunk_indices_offset);
166 WriteSummaryOffset(stats_offset);
167 for (const auto &offset : offsets) {
168 WriteSummaryOffset(offset);
169 }
170
171 // And finally, the footer which must itself reference the start of the
172 // summary and summary offset sections.
173 WriteFooter(summary_offset, summary_offset_offset);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700174 WriteMagic();
James Kuszmaulc31d7362022-05-27 14:20:04 -0700175
176 // TODO(james): Add compression. With flatbuffers messages that contain large
177 // numbers of zeros (e.g., large grids or thresholded images) this can result
178 // in massive savings.
179 if (VLOG_IS_ON(2)) {
180 // For debugging, print out how much space each channel is taking in the
181 // overall log.
182 LOG(INFO) << total_message_bytes_;
183 std::vector<std::pair<size_t, const Channel *>> channel_bytes;
184 for (const auto &pair : total_channel_bytes_) {
185 channel_bytes.push_back(std::make_pair(pair.second, pair.first));
186 }
187 std::sort(channel_bytes.begin(), channel_bytes.end());
188 for (const auto &pair : channel_bytes) {
189 LOG(INFO) << configuration::StrippedChannelToString(pair.second) << ": "
190 << static_cast<float>(pair.first) * 1e-6 << "MB "
191 << static_cast<float>(pair.first) / total_message_bytes_
192 << "\n";
193 }
194 }
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700195}
196
James Kuszmaulb3fba252022-04-06 15:13:31 -0700197std::vector<McapLogger::SummaryOffset> McapLogger::WriteSchemasAndChannels(
198 RegisterHandlers register_handlers) {
James Kuszmaulc31d7362022-05-27 14:20:04 -0700199 uint16_t id = 0;
James Kuszmaulb3fba252022-04-06 15:13:31 -0700200 std::map<uint16_t, const Channel *> channels;
201 for (const Channel *channel : *event_loop_->configuration()->channels()) {
James Kuszmaulc31d7362022-05-27 14:20:04 -0700202 ++id;
James Kuszmaulb3fba252022-04-06 15:13:31 -0700203 if (!configuration::ChannelIsReadableOnNode(channel, event_loop_->node())) {
204 continue;
205 }
206 channels[id] = channel;
207
208 if (register_handlers == RegisterHandlers::kYes) {
209 message_counts_[id] = 0;
210 event_loop_->MakeRawWatcher(
211 channel, [this, id, channel](const Context &context, const void *) {
James Kuszmaul36a25f42022-10-28 10:18:00 -0700212 ChunkStatus *chunk = &current_chunks_[id];
213 WriteMessage(id, channel, context, chunk);
214 if (static_cast<uint64_t>(chunk->data.tellp()) >
James Kuszmaul5c56ed32022-03-30 15:10:07 -0700215 FLAGS_mcap_chunk_size) {
James Kuszmaul36a25f42022-10-28 10:18:00 -0700216 WriteChunk(chunk);
James Kuszmaulb3fba252022-04-06 15:13:31 -0700217 }
218 });
James Kuszmaulc31d7362022-05-27 14:20:04 -0700219 fetchers_[id] = event_loop_->MakeRawFetcher(channel);
220 event_loop_->OnRun([this, id, channel]() {
221 if (FLAGS_fetch && fetchers_[id]->Fetch()) {
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800222 WriteMessage(id, channel, fetchers_[id]->context(),
223 &current_chunks_[id]);
James Kuszmaulc31d7362022-05-27 14:20:04 -0700224 }
225 });
James Kuszmaulb3fba252022-04-06 15:13:31 -0700226 }
James Kuszmaulb3fba252022-04-06 15:13:31 -0700227 }
228
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700229 // Manually add in a special /configuration channel.
230 if (register_handlers == RegisterHandlers::kYes) {
231 configuration_id_ = ++id;
232 event_loop_->OnRun([this]() {
James Kuszmaul6a5479b2022-12-02 10:01:03 -0800233 // TODO(james): Make it so that the timestamp for the configuration
234 // message is not 0.0.
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700235 Context config_context;
236 config_context.monotonic_event_time = event_loop_->monotonic_now();
237 config_context.queue_index = 0;
238 config_context.size = configuration_.span().size();
239 config_context.data = configuration_.span().data();
240 WriteMessage(configuration_id_, &configuration_channel_.message(),
James Kuszmaul36a25f42022-10-28 10:18:00 -0700241 config_context, &current_chunks_[configuration_id_]);
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700242 });
243 }
244
James Kuszmaulb3fba252022-04-06 15:13:31 -0700245 std::vector<SummaryOffset> offsets;
246
247 const uint64_t schema_offset = output_.tellp();
248
249 for (const auto &pair : channels) {
250 WriteSchema(pair.first, pair.second);
251 }
252
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700253 WriteSchema(configuration_id_, &configuration_channel_.message());
254
James Kuszmaulb3fba252022-04-06 15:13:31 -0700255 const uint64_t channel_offset = output_.tellp();
256
257 offsets.push_back(
258 {OpCode::kSchema, schema_offset, channel_offset - schema_offset});
259
260 for (const auto &pair : channels) {
261 // Write out the channel entry that uses the schema (we just re-use
262 // the schema ID for the channel ID, since we aren't deduplicating
263 // schemas for channels that are of the same type).
264 WriteChannel(pair.first, pair.first, pair.second);
265 }
266
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700267 // Provide the configuration message on a special channel that is just named
268 // "configuration", which is guaranteed not to conflict with existing under
269 // our current naming scheme (since our current scheme will, at a minimum, put
270 // a space between the name/type of a channel).
271 WriteChannel(configuration_id_, configuration_id_,
272 &configuration_channel_.message(), "configuration");
273
James Kuszmaulb3fba252022-04-06 15:13:31 -0700274 offsets.push_back({OpCode::kChannel, channel_offset,
275 static_cast<uint64_t>(output_.tellp()) - channel_offset});
276 return offsets;
277}
278
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700279void McapLogger::WriteMagic() { output_ << "\x89MCAP0\r\n"; }
280
281void McapLogger::WriteHeader() {
282 string_builder_.Reset();
283 // "profile"
284 AppendString(&string_builder_, "x-aos");
285 // "library"
286 AppendString(&string_builder_, "AOS MCAP converter");
287 WriteRecord(OpCode::kHeader, string_builder_.Result());
288}
289
James Kuszmaulb3fba252022-04-06 15:13:31 -0700290void McapLogger::WriteFooter(uint64_t summary_offset,
291 uint64_t summary_offset_offset) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700292 string_builder_.Reset();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700293 AppendInt64(&string_builder_, summary_offset);
294 AppendInt64(&string_builder_, summary_offset_offset);
295 // CRC32 for the Summary section, which we don't bother populating.
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700296 AppendInt32(&string_builder_, 0);
297 WriteRecord(OpCode::kFooter, string_builder_.Result());
298}
299
300void McapLogger::WriteDataEnd() {
301 string_builder_.Reset();
302 // CRC32 for the data, which we are too lazy to calculate.
303 AppendInt32(&string_builder_, 0);
304 WriteRecord(OpCode::kDataEnd, string_builder_.Result());
305}
306
307void McapLogger::WriteSchema(const uint16_t id, const aos::Channel *channel) {
308 CHECK(channel->has_schema());
James Kuszmaulc31d7362022-05-27 14:20:04 -0700309
310 const FlatbufferDetachedBuffer<reflection::Schema> schema =
311 CopyFlatBuffer(channel->schema());
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700312
313 // Write out the schema (we don't bother deduplicating schema types):
314 string_builder_.Reset();
315 // Schema ID
316 AppendInt16(&string_builder_, id);
317 // Type name
318 AppendString(&string_builder_, channel->type()->string_view());
James Kuszmaulc31d7362022-05-27 14:20:04 -0700319 switch (serialization_) {
320 case Serialization::kJson:
321 // Encoding
322 AppendString(&string_builder_, "jsonschema");
323 // Actual schema itself
324 AppendString(&string_builder_,
325 JsonSchemaForFlatbuffer({channel->schema()}).dump());
326 break;
327 case Serialization::kFlatbuffer:
328 // Encoding
329 AppendString(&string_builder_, "flatbuffer");
330 // Actual schema itself
331 AppendString(&string_builder_,
332 {reinterpret_cast<const char *>(schema.span().data()),
333 schema.span().size()});
334 break;
335 }
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700336 WriteRecord(OpCode::kSchema, string_builder_.Result());
337}
338
339void McapLogger::WriteChannel(const uint16_t id, const uint16_t schema_id,
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700340 const aos::Channel *channel,
341 std::string_view override_name) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700342 string_builder_.Reset();
343 // Channel ID
344 AppendInt16(&string_builder_, id);
345 // Schema ID
346 AppendInt16(&string_builder_, schema_id);
347 // Topic name
James Kuszmaul9f607c62022-10-27 17:01:55 -0700348 std::string topic_name(override_name);
349 if (topic_name.empty()) {
350 switch (canonical_channels_) {
351 case CanonicalChannelNames::kCanonical:
352 topic_name = absl::StrCat(channel->name()->string_view(), " ",
353 channel->type()->string_view());
354 break;
355 case CanonicalChannelNames::kShortened: {
356 std::set<std::string> names = configuration::GetChannelAliases(
357 event_loop_->configuration(), channel, event_loop_->name(),
358 event_loop_->node());
359 std::string_view shortest_name;
360 for (const std::string &name : names) {
361 if (shortest_name.empty() || name.size() < shortest_name.size()) {
362 shortest_name = name;
363 }
364 }
365 if (shortest_name != channel->name()->string_view()) {
366 VLOG(1) << "Shortening " << channel->name()->string_view() << " "
367 << channel->type()->string_view() << " to " << shortest_name;
368 }
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800369 topic_name =
370 absl::StrCat(shortest_name, " ", channel->type()->string_view());
James Kuszmaul9f607c62022-10-27 17:01:55 -0700371 break;
372 }
373 }
374 }
375 AppendString(&string_builder_, topic_name);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700376 // Encoding
James Kuszmaulc31d7362022-05-27 14:20:04 -0700377 switch (serialization_) {
378 case Serialization::kJson:
379 AppendString(&string_builder_, "json");
380 break;
381 case Serialization::kFlatbuffer:
382 AppendString(&string_builder_, "flatbuffer");
383 break;
384 }
385
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700386 // Metadata (technically supposed to be a Map<string, string>)
387 AppendString(&string_builder_, "");
388 WriteRecord(OpCode::kChannel, string_builder_.Result());
389}
390
391void McapLogger::WriteMessage(uint16_t channel_id, const Channel *channel,
James Kuszmaul36a25f42022-10-28 10:18:00 -0700392 const Context &context, ChunkStatus *chunk) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700393 CHECK_NOTNULL(context.data);
394
James Kuszmaulb3fba252022-04-06 15:13:31 -0700395 message_counts_[channel_id]++;
396
397 if (!earliest_message_.has_value()) {
398 earliest_message_ = context.monotonic_event_time;
James Kuszmaulc31d7362022-05-27 14:20:04 -0700399 } else {
400 earliest_message_ =
401 std::min(context.monotonic_event_time, earliest_message_.value());
James Kuszmaulb3fba252022-04-06 15:13:31 -0700402 }
James Kuszmaul36a25f42022-10-28 10:18:00 -0700403 if (!chunk->earliest_message.has_value()) {
404 chunk->earliest_message = context.monotonic_event_time;
James Kuszmaulc31d7362022-05-27 14:20:04 -0700405 } else {
James Kuszmaul36a25f42022-10-28 10:18:00 -0700406 chunk->earliest_message =
407 std::min(context.monotonic_event_time, chunk->earliest_message.value());
James Kuszmaulb3fba252022-04-06 15:13:31 -0700408 }
James Kuszmaul36a25f42022-10-28 10:18:00 -0700409 chunk->latest_message = context.monotonic_event_time;
James Kuszmaulb3fba252022-04-06 15:13:31 -0700410 latest_message_ = context.monotonic_event_time;
411
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700412 string_builder_.Reset();
413 // Channel ID
414 AppendInt16(&string_builder_, channel_id);
415 // Queue Index
416 AppendInt32(&string_builder_, context.queue_index);
417 // Log time, and publish time. Since we don't log a logged time, just use
418 // published time.
419 // TODO(james): If we use this for multi-node logfiles, use distributed clock.
420 AppendInt64(&string_builder_,
421 context.monotonic_event_time.time_since_epoch().count());
James Kuszmaulc31d7362022-05-27 14:20:04 -0700422 // Note: Foxglove Studio doesn't appear to actually support using publish time
423 // right now.
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700424 AppendInt64(&string_builder_,
425 context.monotonic_event_time.time_since_epoch().count());
426
427 CHECK(flatbuffers::Verify(*channel->schema(),
428 *channel->schema()->root_table(),
429 static_cast<const uint8_t *>(context.data),
430 static_cast<size_t>(context.size)))
431 << ": Corrupted flatbuffer on " << channel->name()->c_str() << " "
432 << channel->type()->c_str();
433
James Kuszmaulc31d7362022-05-27 14:20:04 -0700434 switch (serialization_) {
435 case Serialization::kJson:
436 aos::FlatbufferToJson(&string_builder_, channel->schema(),
437 static_cast<const uint8_t *>(context.data));
438 break;
439 case Serialization::kFlatbuffer:
440 string_builder_.Append(
441 {static_cast<const char *>(context.data), context.size});
442 break;
443 }
444 total_message_bytes_ += context.size;
445 total_channel_bytes_[channel] += context.size;
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700446
James Kuszmaul36a25f42022-10-28 10:18:00 -0700447 chunk->message_indices[channel_id].push_back(
448 std::make_pair<uint64_t, uint64_t>(
449 context.monotonic_event_time.time_since_epoch().count(),
450 chunk->data.tellp()));
James Kuszmaulb3fba252022-04-06 15:13:31 -0700451
James Kuszmaul36a25f42022-10-28 10:18:00 -0700452 WriteRecord(OpCode::kMessage, string_builder_.Result(), &chunk->data);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700453}
454
James Kuszmaulb3fba252022-04-06 15:13:31 -0700455void McapLogger::WriteRecord(OpCode op, std::string_view record,
456 std::ostream *ostream) {
457 ostream->put(static_cast<char>(op));
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700458 uint64_t record_length = record.size();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700459 ostream->write(reinterpret_cast<const char *>(&record_length),
460 sizeof(record_length));
461 *ostream << record;
462}
463
James Kuszmaul36a25f42022-10-28 10:18:00 -0700464void McapLogger::WriteChunk(ChunkStatus *chunk) {
James Kuszmaulb3fba252022-04-06 15:13:31 -0700465 string_builder_.Reset();
466
James Kuszmaul36a25f42022-10-28 10:18:00 -0700467 CHECK(chunk->earliest_message.has_value());
James Kuszmaulb3fba252022-04-06 15:13:31 -0700468 const uint64_t chunk_offset = output_.tellp();
469 AppendInt64(&string_builder_,
James Kuszmaul36a25f42022-10-28 10:18:00 -0700470 chunk->earliest_message->time_since_epoch().count());
471 CHECK(chunk->latest_message.has_value());
472 AppendInt64(&string_builder_,
473 chunk->latest_message.value().time_since_epoch().count());
James Kuszmaulb3fba252022-04-06 15:13:31 -0700474
James Kuszmaul36a25f42022-10-28 10:18:00 -0700475 std::string chunk_records = chunk->data.str();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700476 // Reset the chunk buffer.
James Kuszmaul36a25f42022-10-28 10:18:00 -0700477 chunk->data.str("");
James Kuszmaulb3fba252022-04-06 15:13:31 -0700478
479 const uint64_t records_size = chunk_records.size();
480 // Uncompressed chunk size.
481 AppendInt64(&string_builder_, records_size);
482 // Uncompressed CRC (unpopulated).
483 AppendInt32(&string_builder_, 0);
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800484 // Compression
485 AppendString(&string_builder_, CompressionName(compression_));
486 uint64_t records_size_compressed = records_size;
487 switch (compression_) {
488 case Compression::kNone:
489 AppendBytes(&string_builder_, chunk_records);
490 break;
491 case Compression::kLz4: {
492 // Default preferences.
493 LZ4F_preferences_t *lz4_preferences = nullptr;
494 const uint64_t max_size =
495 LZ4F_compressFrameBound(records_size, lz4_preferences);
496 CHECK_NE(0u, max_size);
497 if (max_size > compression_buffer_.size()) {
498 compression_buffer_.resize(max_size);
499 }
500 records_size_compressed = LZ4F_compressFrame(
501 compression_buffer_.data(), compression_buffer_.size(),
502 reinterpret_cast<const char *>(chunk_records.data()),
503 chunk_records.size(), lz4_preferences);
504 CHECK(!LZ4F_isError(records_size_compressed));
505 AppendBytes(&string_builder_,
506 {reinterpret_cast<const char *>(compression_buffer_.data()),
507 static_cast<size_t>(records_size_compressed)});
508 break;
509 }
510 }
James Kuszmaulb3fba252022-04-06 15:13:31 -0700511 WriteRecord(OpCode::kChunk, string_builder_.Result());
512
513 std::map<uint16_t, uint64_t> index_offsets;
514 const uint64_t message_index_start = output_.tellp();
James Kuszmaul36a25f42022-10-28 10:18:00 -0700515 for (const auto &indices : chunk->message_indices) {
James Kuszmaulb3fba252022-04-06 15:13:31 -0700516 index_offsets[indices.first] = output_.tellp();
517 string_builder_.Reset();
518 AppendInt16(&string_builder_, indices.first);
519 AppendMessageIndices(&string_builder_, indices.second);
520 WriteRecord(OpCode::kMessageIndex, string_builder_.Result());
521 }
James Kuszmaul36a25f42022-10-28 10:18:00 -0700522 chunk->message_indices.clear();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700523 chunk_indices_.push_back(ChunkIndex{
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800524 .start_time = chunk->earliest_message.value(),
525 .end_time = chunk->latest_message.value(),
526 .offset = chunk_offset,
527 .chunk_size = message_index_start - chunk_offset,
528 .records_size = records_size,
529 .records_size_compressed = records_size_compressed,
530 .message_index_offsets = index_offsets,
531 .message_index_size =
532 static_cast<uint64_t>(output_.tellp()) - message_index_start,
533 .compression = compression_});
James Kuszmaul36a25f42022-10-28 10:18:00 -0700534 chunk->earliest_message.reset();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700535}
536
537McapLogger::SummaryOffset McapLogger::WriteStatistics() {
538 const uint64_t stats_offset = output_.tellp();
539 const uint64_t message_count = std::accumulate(
540 message_counts_.begin(), message_counts_.end(), 0,
541 [](const uint64_t &count, const std::pair<uint16_t, uint64_t> &val) {
542 return count + val.second;
543 });
544 string_builder_.Reset();
545 AppendInt64(&string_builder_, message_count);
546 // Schema count.
547 AppendInt16(&string_builder_, message_counts_.size());
548 // Channel count.
549 AppendInt32(&string_builder_, message_counts_.size());
550 // Attachment count.
551 AppendInt32(&string_builder_, 0);
552 // Metadata count.
553 AppendInt32(&string_builder_, 0);
554 // Chunk count.
555 AppendInt32(&string_builder_, chunk_indices_.size());
556 // Earliest & latest message times.
557 AppendInt64(&string_builder_, earliest_message_->time_since_epoch().count());
558 AppendInt64(&string_builder_, latest_message_.time_since_epoch().count());
559 // Per-channel message counts.
560 AppendChannelMap(&string_builder_, message_counts_);
561 WriteRecord(OpCode::kStatistics, string_builder_.Result());
562 return {OpCode::kStatistics, stats_offset,
563 static_cast<uint64_t>(output_.tellp()) - stats_offset};
564}
565
566McapLogger::SummaryOffset McapLogger::WriteChunkIndices() {
567 const uint64_t index_offset = output_.tellp();
568 for (const ChunkIndex &index : chunk_indices_) {
569 string_builder_.Reset();
570 AppendInt64(&string_builder_, index.start_time.time_since_epoch().count());
571 AppendInt64(&string_builder_, index.end_time.time_since_epoch().count());
572 AppendInt64(&string_builder_, index.offset);
573 AppendInt64(&string_builder_, index.chunk_size);
574 AppendChannelMap(&string_builder_, index.message_index_offsets);
575 AppendInt64(&string_builder_, index.message_index_size);
576 // Compression used.
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800577 AppendString(&string_builder_, CompressionName(index.compression));
James Kuszmaulb3fba252022-04-06 15:13:31 -0700578 // Compressed and uncompressed records size.
James Kuszmaul5ab990d2022-11-07 16:35:49 -0800579 AppendInt64(&string_builder_, index.records_size_compressed);
James Kuszmaulb3fba252022-04-06 15:13:31 -0700580 AppendInt64(&string_builder_, index.records_size);
581 WriteRecord(OpCode::kChunkIndex, string_builder_.Result());
582 }
583 return {OpCode::kChunkIndex, index_offset,
584 static_cast<uint64_t>(output_.tellp()) - index_offset};
585}
586
587void McapLogger::WriteSummaryOffset(const SummaryOffset &offset) {
588 string_builder_.Reset();
589 string_builder_.AppendChar(static_cast<char>(offset.op_code));
590 AppendInt64(&string_builder_, offset.offset);
591 AppendInt64(&string_builder_, offset.size);
592 WriteRecord(OpCode::kSummaryOffset, string_builder_.Result());
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700593}
594
595void McapLogger::AppendString(FastStringBuilder *builder,
596 std::string_view string) {
597 AppendInt32(builder, string.size());
598 builder->Append(string);
599}
600
James Kuszmaulb3fba252022-04-06 15:13:31 -0700601void McapLogger::AppendBytes(FastStringBuilder *builder,
602 std::string_view bytes) {
603 AppendInt64(builder, bytes.size());
604 builder->Append(bytes);
605}
606
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700607namespace {
608template <typename T>
609static void AppendInt(FastStringBuilder *builder, T val) {
610 builder->Append(
611 std::string_view(reinterpret_cast<const char *>(&val), sizeof(T)));
612}
James Kuszmaulb3fba252022-04-06 15:13:31 -0700613template <typename T>
614void AppendMap(FastStringBuilder *builder, const T &map) {
615 AppendInt<uint32_t>(
616 builder, map.size() * (sizeof(typename T::value_type::first_type) +
617 sizeof(typename T::value_type::second_type)));
618 for (const auto &pair : map) {
619 AppendInt(builder, pair.first);
620 AppendInt(builder, pair.second);
621 }
622}
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700623} // namespace
624
James Kuszmaulb3fba252022-04-06 15:13:31 -0700625void McapLogger::AppendChannelMap(FastStringBuilder *builder,
626 const std::map<uint16_t, uint64_t> &map) {
627 AppendMap(builder, map);
628}
629
630void McapLogger::AppendMessageIndices(
631 FastStringBuilder *builder,
632 const std::vector<std::pair<uint64_t, uint64_t>> &messages) {
633 AppendMap(builder, messages);
634}
635
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700636void McapLogger::AppendInt16(FastStringBuilder *builder, uint16_t val) {
637 AppendInt(builder, val);
638}
639
640void McapLogger::AppendInt32(FastStringBuilder *builder, uint32_t val) {
641 AppendInt(builder, val);
642}
643
644void McapLogger::AppendInt64(FastStringBuilder *builder, uint64_t val) {
645 AppendInt(builder, val);
646}
647} // namespace aos