blob: dc27504b672ea98fc82c061ea2986c1ce77883d6 [file] [log] [blame]
James Kuszmaul4ed5fb12022-03-22 15:20:04 -07001#include "aos/util/mcap_logger.h"
2
3#include "absl/strings/str_replace.h"
James Kuszmaule4aa01d2022-06-28 14:09:02 -07004#include "aos/configuration_schema.h"
James Kuszmaulc31d7362022-05-27 14:20:04 -07005#include "aos/flatbuffer_merge.h"
James Kuszmaul4ed5fb12022-03-22 15:20:04 -07006#include "single_include/nlohmann/json.hpp"
7
James Kuszmaulc31d7362022-05-27 14:20:04 -07008DEFINE_uint64(mcap_chunk_size, 10'000'000,
James Kuszmaul5c56ed32022-03-30 15:10:07 -07009 "Size, in bytes, of individual MCAP chunks");
James Kuszmaulc31d7362022-05-27 14:20:04 -070010DEFINE_bool(fetch, false,
11 "Whether to fetch most recent messages at start of logfile. Turn "
12 "this on if there are, e.g., one-time messages sent before the "
13 "start of the logfile that you need access to. Turn it off if you "
14 "don't want to deal with having messages that have timestamps that "
15 "may be arbitrarily far before any other interesting messages.");
James Kuszmaul5c56ed32022-03-30 15:10:07 -070016
James Kuszmaul4ed5fb12022-03-22 15:20:04 -070017namespace aos {
James Kuszmaulb3fba252022-04-06 15:13:31 -070018
James Kuszmaul4ed5fb12022-03-22 15:20:04 -070019nlohmann::json JsonSchemaForFlatbuffer(const FlatbufferType &type,
20 JsonSchemaRecursion recursion_level) {
21 nlohmann::json schema;
22 if (recursion_level == JsonSchemaRecursion::kTopLevel) {
23 schema["$schema"] = "https://json-schema.org/draft/2020-12/schema";
24 }
25 schema["type"] = "object";
26 nlohmann::json properties;
27 for (int index = 0; index < type.NumberFields(); ++index) {
28 nlohmann::json field;
29 const bool is_array = type.FieldIsRepeating(index);
30 if (type.FieldIsSequence(index)) {
31 // For sub-tables/structs, just recurse.
32 nlohmann::json subtype = JsonSchemaForFlatbuffer(
33 type.FieldType(index), JsonSchemaRecursion::kNested);
34 if (is_array) {
35 field["type"] = "array";
36 field["items"] = subtype;
37 } else {
38 field = subtype;
39 }
40 } else {
41 std::string elementary_type;
42 switch (type.FieldElementaryType(index)) {
43 case flatbuffers::ET_UTYPE:
44 case flatbuffers::ET_CHAR:
45 case flatbuffers::ET_UCHAR:
46 case flatbuffers::ET_SHORT:
47 case flatbuffers::ET_USHORT:
48 case flatbuffers::ET_INT:
49 case flatbuffers::ET_UINT:
50 case flatbuffers::ET_LONG:
51 case flatbuffers::ET_ULONG:
52 case flatbuffers::ET_FLOAT:
53 case flatbuffers::ET_DOUBLE:
54 elementary_type = "number";
55 break;
56 case flatbuffers::ET_BOOL:
57 elementary_type = "boolean";
58 break;
59 case flatbuffers::ET_STRING:
60 elementary_type = "string";
61 break;
62 case flatbuffers::ET_SEQUENCE:
63 if (type.FieldIsEnum(index)) {
64 elementary_type = "string";
65 } else {
66 LOG(FATAL) << "Should not encounter any sequence fields here.";
67 }
68 break;
69 }
70 if (is_array) {
71 field["type"] = "array";
72 field["items"]["type"] = elementary_type;
73 } else {
74 field["type"] = elementary_type;
75 }
76 }
77 // the nlohmann::json [] operator needs an actual string, not just a
78 // string_view :(.
79 properties[std::string(type.FieldName(index))] = field;
80 }
81 schema["properties"] = properties;
82 return schema;
83}
84
James Kuszmaulc31d7362022-05-27 14:20:04 -070085McapLogger::McapLogger(EventLoop *event_loop, const std::string &output_path,
86 Serialization serialization)
87 : event_loop_(event_loop),
88 output_(output_path),
James Kuszmaule4aa01d2022-06-28 14:09:02 -070089 serialization_(serialization),
90 configuration_channel_([]() {
91 // Setup a fake Channel for providing the configuration in the MCAP
92 // file. This is included for convenience so that consumers of the MCAP
93 // file can actually dereference things like the channel indices in AOS
94 // timing reports.
95 flatbuffers::FlatBufferBuilder fbb;
96 flatbuffers::Offset<flatbuffers::String> name_offset =
97 fbb.CreateString("");
98 flatbuffers::Offset<flatbuffers::String> type_offset =
99 fbb.CreateString("aos.Configuration");
100 flatbuffers::Offset<reflection::Schema> schema_offset =
101 aos::CopyFlatBuffer(
102 aos::FlatbufferSpan<reflection::Schema>(ConfigurationSchema()),
103 &fbb);
104 Channel::Builder channel(fbb);
105 channel.add_name(name_offset);
106 channel.add_type(type_offset);
107 channel.add_schema(schema_offset);
108 fbb.Finish(channel.Finish());
109 return fbb.Release();
110 }()),
111 configuration_(CopyFlatBuffer(event_loop_->configuration())) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700112 event_loop->SkipTimingReport();
113 event_loop->SkipAosLog();
114 CHECK(output_);
115 WriteMagic();
116 WriteHeader();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700117 // Schemas and channels get written out both at the start and end of the file,
118 // per the MCAP spec.
119 WriteSchemasAndChannels(RegisterHandlers::kYes);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700120}
121
122McapLogger::~McapLogger() {
James Kuszmaulb3fba252022-04-06 15:13:31 -0700123 // If we have any data remaining, write one last chunk.
124 if (current_chunk_.tellp() > 0) {
125 WriteChunk();
126 }
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700127 WriteDataEnd();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700128
129 // Now we enter the Summary section, where we write out all the channel/index
130 // information that readers need to be able to seek to arbitrary locations
131 // within the log.
132 const uint64_t summary_offset = output_.tellp();
133 const SummaryOffset chunk_indices_offset = WriteChunkIndices();
134 const SummaryOffset stats_offset = WriteStatistics();
135 // Schemas/Channels need to get reproduced in the summary section for random
136 // access reading.
137 const std::vector<SummaryOffset> offsets =
138 WriteSchemasAndChannels(RegisterHandlers::kNo);
139
140 // Next we have the summary offset section, which references the individual
141 // pieces of the summary section.
142 const uint64_t summary_offset_offset = output_.tellp();
143
144 // SummarytOffset's must all be the final thing before the footer.
145 WriteSummaryOffset(chunk_indices_offset);
146 WriteSummaryOffset(stats_offset);
147 for (const auto &offset : offsets) {
148 WriteSummaryOffset(offset);
149 }
150
151 // And finally, the footer which must itself reference the start of the
152 // summary and summary offset sections.
153 WriteFooter(summary_offset, summary_offset_offset);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700154 WriteMagic();
James Kuszmaulc31d7362022-05-27 14:20:04 -0700155
156 // TODO(james): Add compression. With flatbuffers messages that contain large
157 // numbers of zeros (e.g., large grids or thresholded images) this can result
158 // in massive savings.
159 if (VLOG_IS_ON(2)) {
160 // For debugging, print out how much space each channel is taking in the
161 // overall log.
162 LOG(INFO) << total_message_bytes_;
163 std::vector<std::pair<size_t, const Channel *>> channel_bytes;
164 for (const auto &pair : total_channel_bytes_) {
165 channel_bytes.push_back(std::make_pair(pair.second, pair.first));
166 }
167 std::sort(channel_bytes.begin(), channel_bytes.end());
168 for (const auto &pair : channel_bytes) {
169 LOG(INFO) << configuration::StrippedChannelToString(pair.second) << ": "
170 << static_cast<float>(pair.first) * 1e-6 << "MB "
171 << static_cast<float>(pair.first) / total_message_bytes_
172 << "\n";
173 }
174 }
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700175}
176
James Kuszmaulb3fba252022-04-06 15:13:31 -0700177std::vector<McapLogger::SummaryOffset> McapLogger::WriteSchemasAndChannels(
178 RegisterHandlers register_handlers) {
James Kuszmaulc31d7362022-05-27 14:20:04 -0700179 uint16_t id = 0;
James Kuszmaulb3fba252022-04-06 15:13:31 -0700180 std::map<uint16_t, const Channel *> channels;
181 for (const Channel *channel : *event_loop_->configuration()->channels()) {
James Kuszmaulc31d7362022-05-27 14:20:04 -0700182 ++id;
James Kuszmaulb3fba252022-04-06 15:13:31 -0700183 if (!configuration::ChannelIsReadableOnNode(channel, event_loop_->node())) {
184 continue;
185 }
186 channels[id] = channel;
187
188 if (register_handlers == RegisterHandlers::kYes) {
189 message_counts_[id] = 0;
190 event_loop_->MakeRawWatcher(
191 channel, [this, id, channel](const Context &context, const void *) {
192 WriteMessage(id, channel, context, &current_chunk_);
James Kuszmaul5c56ed32022-03-30 15:10:07 -0700193 if (static_cast<uint64_t>(current_chunk_.tellp()) >
194 FLAGS_mcap_chunk_size) {
James Kuszmaulb3fba252022-04-06 15:13:31 -0700195 WriteChunk();
196 }
197 });
James Kuszmaulc31d7362022-05-27 14:20:04 -0700198 fetchers_[id] = event_loop_->MakeRawFetcher(channel);
199 event_loop_->OnRun([this, id, channel]() {
200 if (FLAGS_fetch && fetchers_[id]->Fetch()) {
201 WriteMessage(id, channel, fetchers_[id]->context(), &current_chunk_);
202 }
203 });
James Kuszmaulb3fba252022-04-06 15:13:31 -0700204 }
James Kuszmaulb3fba252022-04-06 15:13:31 -0700205 }
206
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700207 // Manually add in a special /configuration channel.
208 if (register_handlers == RegisterHandlers::kYes) {
209 configuration_id_ = ++id;
210 event_loop_->OnRun([this]() {
211 Context config_context;
212 config_context.monotonic_event_time = event_loop_->monotonic_now();
213 config_context.queue_index = 0;
214 config_context.size = configuration_.span().size();
215 config_context.data = configuration_.span().data();
216 WriteMessage(configuration_id_, &configuration_channel_.message(),
217 config_context, &current_chunk_);
218 });
219 }
220
James Kuszmaulb3fba252022-04-06 15:13:31 -0700221 std::vector<SummaryOffset> offsets;
222
223 const uint64_t schema_offset = output_.tellp();
224
225 for (const auto &pair : channels) {
226 WriteSchema(pair.first, pair.second);
227 }
228
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700229 WriteSchema(configuration_id_, &configuration_channel_.message());
230
James Kuszmaulb3fba252022-04-06 15:13:31 -0700231 const uint64_t channel_offset = output_.tellp();
232
233 offsets.push_back(
234 {OpCode::kSchema, schema_offset, channel_offset - schema_offset});
235
236 for (const auto &pair : channels) {
237 // Write out the channel entry that uses the schema (we just re-use
238 // the schema ID for the channel ID, since we aren't deduplicating
239 // schemas for channels that are of the same type).
240 WriteChannel(pair.first, pair.first, pair.second);
241 }
242
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700243 // Provide the configuration message on a special channel that is just named
244 // "configuration", which is guaranteed not to conflict with existing under
245 // our current naming scheme (since our current scheme will, at a minimum, put
246 // a space between the name/type of a channel).
247 WriteChannel(configuration_id_, configuration_id_,
248 &configuration_channel_.message(), "configuration");
249
James Kuszmaulb3fba252022-04-06 15:13:31 -0700250 offsets.push_back({OpCode::kChannel, channel_offset,
251 static_cast<uint64_t>(output_.tellp()) - channel_offset});
252 return offsets;
253}
254
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700255void McapLogger::WriteMagic() { output_ << "\x89MCAP0\r\n"; }
256
257void McapLogger::WriteHeader() {
258 string_builder_.Reset();
259 // "profile"
260 AppendString(&string_builder_, "x-aos");
261 // "library"
262 AppendString(&string_builder_, "AOS MCAP converter");
263 WriteRecord(OpCode::kHeader, string_builder_.Result());
264}
265
James Kuszmaulb3fba252022-04-06 15:13:31 -0700266void McapLogger::WriteFooter(uint64_t summary_offset,
267 uint64_t summary_offset_offset) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700268 string_builder_.Reset();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700269 AppendInt64(&string_builder_, summary_offset);
270 AppendInt64(&string_builder_, summary_offset_offset);
271 // CRC32 for the Summary section, which we don't bother populating.
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700272 AppendInt32(&string_builder_, 0);
273 WriteRecord(OpCode::kFooter, string_builder_.Result());
274}
275
276void McapLogger::WriteDataEnd() {
277 string_builder_.Reset();
278 // CRC32 for the data, which we are too lazy to calculate.
279 AppendInt32(&string_builder_, 0);
280 WriteRecord(OpCode::kDataEnd, string_builder_.Result());
281}
282
283void McapLogger::WriteSchema(const uint16_t id, const aos::Channel *channel) {
284 CHECK(channel->has_schema());
James Kuszmaulc31d7362022-05-27 14:20:04 -0700285
286 const FlatbufferDetachedBuffer<reflection::Schema> schema =
287 CopyFlatBuffer(channel->schema());
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700288
289 // Write out the schema (we don't bother deduplicating schema types):
290 string_builder_.Reset();
291 // Schema ID
292 AppendInt16(&string_builder_, id);
293 // Type name
294 AppendString(&string_builder_, channel->type()->string_view());
James Kuszmaulc31d7362022-05-27 14:20:04 -0700295 switch (serialization_) {
296 case Serialization::kJson:
297 // Encoding
298 AppendString(&string_builder_, "jsonschema");
299 // Actual schema itself
300 AppendString(&string_builder_,
301 JsonSchemaForFlatbuffer({channel->schema()}).dump());
302 break;
303 case Serialization::kFlatbuffer:
304 // Encoding
305 AppendString(&string_builder_, "flatbuffer");
306 // Actual schema itself
307 AppendString(&string_builder_,
308 {reinterpret_cast<const char *>(schema.span().data()),
309 schema.span().size()});
310 break;
311 }
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700312 WriteRecord(OpCode::kSchema, string_builder_.Result());
313}
314
315void McapLogger::WriteChannel(const uint16_t id, const uint16_t schema_id,
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700316 const aos::Channel *channel,
317 std::string_view override_name) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700318 string_builder_.Reset();
319 // Channel ID
320 AppendInt16(&string_builder_, id);
321 // Schema ID
322 AppendInt16(&string_builder_, schema_id);
323 // Topic name
324 AppendString(&string_builder_,
James Kuszmaule4aa01d2022-06-28 14:09:02 -0700325 override_name.empty()
326 ? absl::StrCat(channel->name()->string_view(), " ",
327 channel->type()->string_view())
328 : override_name);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700329 // Encoding
James Kuszmaulc31d7362022-05-27 14:20:04 -0700330 switch (serialization_) {
331 case Serialization::kJson:
332 AppendString(&string_builder_, "json");
333 break;
334 case Serialization::kFlatbuffer:
335 AppendString(&string_builder_, "flatbuffer");
336 break;
337 }
338
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700339 // Metadata (technically supposed to be a Map<string, string>)
340 AppendString(&string_builder_, "");
341 WriteRecord(OpCode::kChannel, string_builder_.Result());
342}
343
344void McapLogger::WriteMessage(uint16_t channel_id, const Channel *channel,
James Kuszmaulb3fba252022-04-06 15:13:31 -0700345 const Context &context, std::ostream *output) {
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700346 CHECK_NOTNULL(context.data);
347
James Kuszmaulb3fba252022-04-06 15:13:31 -0700348 message_counts_[channel_id]++;
349
350 if (!earliest_message_.has_value()) {
351 earliest_message_ = context.monotonic_event_time;
James Kuszmaulc31d7362022-05-27 14:20:04 -0700352 } else {
353 earliest_message_ =
354 std::min(context.monotonic_event_time, earliest_message_.value());
James Kuszmaulb3fba252022-04-06 15:13:31 -0700355 }
356 if (!earliest_chunk_message_.has_value()) {
357 earliest_chunk_message_ = context.monotonic_event_time;
James Kuszmaulc31d7362022-05-27 14:20:04 -0700358 } else {
359 earliest_chunk_message_ =
360 std::min(context.monotonic_event_time, earliest_chunk_message_.value());
James Kuszmaulb3fba252022-04-06 15:13:31 -0700361 }
362 latest_message_ = context.monotonic_event_time;
363
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700364 string_builder_.Reset();
365 // Channel ID
366 AppendInt16(&string_builder_, channel_id);
367 // Queue Index
368 AppendInt32(&string_builder_, context.queue_index);
369 // Log time, and publish time. Since we don't log a logged time, just use
370 // published time.
371 // TODO(james): If we use this for multi-node logfiles, use distributed clock.
372 AppendInt64(&string_builder_,
373 context.monotonic_event_time.time_since_epoch().count());
James Kuszmaulc31d7362022-05-27 14:20:04 -0700374 // Note: Foxglove Studio doesn't appear to actually support using publish time
375 // right now.
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700376 AppendInt64(&string_builder_,
377 context.monotonic_event_time.time_since_epoch().count());
378
379 CHECK(flatbuffers::Verify(*channel->schema(),
380 *channel->schema()->root_table(),
381 static_cast<const uint8_t *>(context.data),
382 static_cast<size_t>(context.size)))
383 << ": Corrupted flatbuffer on " << channel->name()->c_str() << " "
384 << channel->type()->c_str();
385
James Kuszmaulc31d7362022-05-27 14:20:04 -0700386 switch (serialization_) {
387 case Serialization::kJson:
388 aos::FlatbufferToJson(&string_builder_, channel->schema(),
389 static_cast<const uint8_t *>(context.data));
390 break;
391 case Serialization::kFlatbuffer:
392 string_builder_.Append(
393 {static_cast<const char *>(context.data), context.size});
394 break;
395 }
396 total_message_bytes_ += context.size;
397 total_channel_bytes_[channel] += context.size;
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700398
James Kuszmaulb3fba252022-04-06 15:13:31 -0700399 message_indices_[channel_id].push_back(std::make_pair<uint64_t, uint64_t>(
400 context.monotonic_event_time.time_since_epoch().count(),
401 output->tellp()));
402
403 WriteRecord(OpCode::kMessage, string_builder_.Result(), output);
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700404}
405
James Kuszmaulb3fba252022-04-06 15:13:31 -0700406void McapLogger::WriteRecord(OpCode op, std::string_view record,
407 std::ostream *ostream) {
408 ostream->put(static_cast<char>(op));
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700409 uint64_t record_length = record.size();
James Kuszmaulb3fba252022-04-06 15:13:31 -0700410 ostream->write(reinterpret_cast<const char *>(&record_length),
411 sizeof(record_length));
412 *ostream << record;
413}
414
415void McapLogger::WriteChunk() {
416 string_builder_.Reset();
417
418 CHECK(earliest_chunk_message_.has_value());
419 const uint64_t chunk_offset = output_.tellp();
420 AppendInt64(&string_builder_,
421 earliest_chunk_message_->time_since_epoch().count());
422 AppendInt64(&string_builder_, latest_message_.time_since_epoch().count());
423
424 std::string chunk_records = current_chunk_.str();
425 // Reset the chunk buffer.
426 current_chunk_.str("");
427
428 const uint64_t records_size = chunk_records.size();
429 // Uncompressed chunk size.
430 AppendInt64(&string_builder_, records_size);
431 // Uncompressed CRC (unpopulated).
432 AppendInt32(&string_builder_, 0);
433 AppendString(&string_builder_, "");
434 AppendBytes(&string_builder_, chunk_records);
435 WriteRecord(OpCode::kChunk, string_builder_.Result());
436
437 std::map<uint16_t, uint64_t> index_offsets;
438 const uint64_t message_index_start = output_.tellp();
439 for (const auto &indices : message_indices_) {
440 index_offsets[indices.first] = output_.tellp();
441 string_builder_.Reset();
442 AppendInt16(&string_builder_, indices.first);
443 AppendMessageIndices(&string_builder_, indices.second);
444 WriteRecord(OpCode::kMessageIndex, string_builder_.Result());
445 }
446 message_indices_.clear();
447 chunk_indices_.push_back(ChunkIndex{
448 earliest_chunk_message_.value(), latest_message_, chunk_offset,
449 message_index_start - chunk_offset, records_size, index_offsets,
450 static_cast<uint64_t>(output_.tellp()) - message_index_start});
451 earliest_chunk_message_.reset();
452}
453
454McapLogger::SummaryOffset McapLogger::WriteStatistics() {
455 const uint64_t stats_offset = output_.tellp();
456 const uint64_t message_count = std::accumulate(
457 message_counts_.begin(), message_counts_.end(), 0,
458 [](const uint64_t &count, const std::pair<uint16_t, uint64_t> &val) {
459 return count + val.second;
460 });
461 string_builder_.Reset();
462 AppendInt64(&string_builder_, message_count);
463 // Schema count.
464 AppendInt16(&string_builder_, message_counts_.size());
465 // Channel count.
466 AppendInt32(&string_builder_, message_counts_.size());
467 // Attachment count.
468 AppendInt32(&string_builder_, 0);
469 // Metadata count.
470 AppendInt32(&string_builder_, 0);
471 // Chunk count.
472 AppendInt32(&string_builder_, chunk_indices_.size());
473 // Earliest & latest message times.
474 AppendInt64(&string_builder_, earliest_message_->time_since_epoch().count());
475 AppendInt64(&string_builder_, latest_message_.time_since_epoch().count());
476 // Per-channel message counts.
477 AppendChannelMap(&string_builder_, message_counts_);
478 WriteRecord(OpCode::kStatistics, string_builder_.Result());
479 return {OpCode::kStatistics, stats_offset,
480 static_cast<uint64_t>(output_.tellp()) - stats_offset};
481}
482
483McapLogger::SummaryOffset McapLogger::WriteChunkIndices() {
484 const uint64_t index_offset = output_.tellp();
485 for (const ChunkIndex &index : chunk_indices_) {
486 string_builder_.Reset();
487 AppendInt64(&string_builder_, index.start_time.time_since_epoch().count());
488 AppendInt64(&string_builder_, index.end_time.time_since_epoch().count());
489 AppendInt64(&string_builder_, index.offset);
490 AppendInt64(&string_builder_, index.chunk_size);
491 AppendChannelMap(&string_builder_, index.message_index_offsets);
492 AppendInt64(&string_builder_, index.message_index_size);
493 // Compression used.
494 AppendString(&string_builder_, "");
495 // Compressed and uncompressed records size.
496 AppendInt64(&string_builder_, index.records_size);
497 AppendInt64(&string_builder_, index.records_size);
498 WriteRecord(OpCode::kChunkIndex, string_builder_.Result());
499 }
500 return {OpCode::kChunkIndex, index_offset,
501 static_cast<uint64_t>(output_.tellp()) - index_offset};
502}
503
504void McapLogger::WriteSummaryOffset(const SummaryOffset &offset) {
505 string_builder_.Reset();
506 string_builder_.AppendChar(static_cast<char>(offset.op_code));
507 AppendInt64(&string_builder_, offset.offset);
508 AppendInt64(&string_builder_, offset.size);
509 WriteRecord(OpCode::kSummaryOffset, string_builder_.Result());
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700510}
511
512void McapLogger::AppendString(FastStringBuilder *builder,
513 std::string_view string) {
514 AppendInt32(builder, string.size());
515 builder->Append(string);
516}
517
James Kuszmaulb3fba252022-04-06 15:13:31 -0700518void McapLogger::AppendBytes(FastStringBuilder *builder,
519 std::string_view bytes) {
520 AppendInt64(builder, bytes.size());
521 builder->Append(bytes);
522}
523
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700524namespace {
525template <typename T>
526static void AppendInt(FastStringBuilder *builder, T val) {
527 builder->Append(
528 std::string_view(reinterpret_cast<const char *>(&val), sizeof(T)));
529}
James Kuszmaulb3fba252022-04-06 15:13:31 -0700530template <typename T>
531void AppendMap(FastStringBuilder *builder, const T &map) {
532 AppendInt<uint32_t>(
533 builder, map.size() * (sizeof(typename T::value_type::first_type) +
534 sizeof(typename T::value_type::second_type)));
535 for (const auto &pair : map) {
536 AppendInt(builder, pair.first);
537 AppendInt(builder, pair.second);
538 }
539}
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700540} // namespace
541
James Kuszmaulb3fba252022-04-06 15:13:31 -0700542void McapLogger::AppendChannelMap(FastStringBuilder *builder,
543 const std::map<uint16_t, uint64_t> &map) {
544 AppendMap(builder, map);
545}
546
547void McapLogger::AppendMessageIndices(
548 FastStringBuilder *builder,
549 const std::vector<std::pair<uint64_t, uint64_t>> &messages) {
550 AppendMap(builder, messages);
551}
552
James Kuszmaul4ed5fb12022-03-22 15:20:04 -0700553void McapLogger::AppendInt16(FastStringBuilder *builder, uint16_t val) {
554 AppendInt(builder, val);
555}
556
557void McapLogger::AppendInt32(FastStringBuilder *builder, uint32_t val) {
558 AppendInt(builder, val);
559}
560
561void McapLogger::AppendInt64(FastStringBuilder *builder, uint64_t val) {
562 AppendInt(builder, val);
563}
564} // namespace aos