Use config found in SortLogs instead of random log files
This both gives us a single Configuration object to use for all
matching configuration, and also gives us an abstraction to use when we
move the config out of the top of each log file and into a separate
file.
While we are here, expose name from the log file header as well. This
removes the last user of the raw LogFileHeader from logger.
Change-Id: I0c99d64f9a7222e17100650cdf4b018ae224887a
diff --git a/WORKSPACE b/WORKSPACE
index 42a5945..aabd057 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -1,6 +1,6 @@
workspace(name = "org_frc971")
-load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
+load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load(
"//debian:python.bzl",
@@ -167,6 +167,15 @@
path = "third_party/eigen",
)
+http_archive(
+ name = "boringssl",
+ patch_args = ["-p1"],
+ patches = ["//debian:boringssl.patch"],
+ sha256 = "bcab08a22c28f5322316542aa2c3a9ef0a9f9fde9be22d489cee574867b24675",
+ strip_prefix = "boringssl-613fe9dbe74b58d6aaaf0d22fe57dccd964c7413",
+ urls = ["https://www.frc971.org/Build-Dependencies/boringssl-613fe9dbe74b58d6aaaf0d22fe57dccd964c7413.zip"],
+)
+
# C++ rules for Bazel.
http_archive(
name = "rules_cc",
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 7fa6ec2..5d21ae4 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -38,6 +38,7 @@
"@com_github_google_flatbuffers//:flatbuffers",
"@com_github_google_glog//:glog",
"@com_google_absl//absl/types:span",
+ "@boringssl//:crypto",
] + select({
"//tools:cpu_k8": [":lzma_encoder"],
"//tools:cpu_aarch64": [":lzma_encoder"],
diff --git a/aos/events/logging/logfile_sorting.cc b/aos/events/logging/logfile_sorting.cc
index 22fc513..f03bb3d 100644
--- a/aos/events/logging/logfile_sorting.cc
+++ b/aos/events/logging/logfile_sorting.cc
@@ -9,15 +9,32 @@
#include "sys/stat.h"
#include "aos/events/logging/logfile_utils.h"
+#include "aos/flatbuffer_merge.h"
#include "aos/flatbuffers.h"
#include "aos/time/time.h"
+#include <openssl/sha.h>
+
namespace aos {
namespace logger {
namespace chrono = std::chrono;
namespace {
+std::string Sha256(const absl::Span<const uint8_t> str) {
+ unsigned char hash[SHA256_DIGEST_LENGTH];
+ SHA256_CTX sha256;
+ SHA256_Init(&sha256);
+ SHA256_Update(&sha256, str.data(), str.size());
+ SHA256_Final(hash, &sha256);
+ std::stringstream ss;
+ for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) {
+ ss << std::hex << std::setw(2) << std::setfill('0')
+ << static_cast<int>(hash[i]);
+ }
+ return ss.str();
+}
+
// Check if string ends with ending
bool EndsWith(std::string_view str, std::string_view ending) {
return str.size() >= ending.size() &&
@@ -107,6 +124,9 @@
aos::realtime_clock::time_point realtime_start_time =
aos::realtime_clock::min_time;
+ // Name from a log. All logs below have been confirmed to match.
+ std::string name;
+
std::map<std::string, UnsortedLogParts> unsorted_parts;
};
@@ -124,6 +144,9 @@
// extracting.
std::vector<std::pair<monotonic_clock::time_point, std::string>>
unsorted_parts;
+
+ // Name from a log. All logs below have been confirmed to match.
+ std::string name;
};
// A list of all the old parts which we don't know how to sort using uuids.
@@ -156,6 +179,11 @@
? log_header->message().node()->name()->string_view()
: "";
+ const std::string_view name =
+ log_header->message().has_name()
+ ? log_header->message().name()->string_view()
+ : "";
+
const std::string_view logger_node =
log_header->message().has_logger_node()
? log_header->message().logger_node()->name()->string_view()
@@ -201,9 +229,11 @@
old_parts.back().parts.realtime_start_time = realtime_start_time;
old_parts.back().unsorted_parts.emplace_back(
std::make_pair(first_message_time, part));
+ old_parts.back().name = name;
} else {
result->unsorted_parts.emplace_back(
std::make_pair(first_message_time, part));
+ CHECK_EQ(result->name, name);
}
continue;
}
@@ -212,6 +242,8 @@
CHECK(log_header->message().has_parts_uuid());
CHECK(log_header->message().has_parts_index());
+ CHECK(log_header->message().has_configuration());
+
CHECK_EQ(log_header->message().has_logger_node(),
log_header->message().has_node());
@@ -228,9 +260,11 @@
.first;
log_it->second.logger_node = logger_node;
log_it->second.logger_boot_uuid = logger_boot_uuid;
+ log_it->second.name = name;
} else {
CHECK_EQ(log_it->second.logger_node, logger_node);
CHECK_EQ(log_it->second.logger_boot_uuid, logger_boot_uuid);
+ CHECK_EQ(log_it->second.name, name);
}
if (node == log_it->second.logger_node) {
@@ -293,6 +327,8 @@
<< ": Can't have a mix of old and new parts.";
// Now reformat old_parts to be in the right datastructure to report.
+ std::map<std::string, std::shared_ptr<const Configuration>>
+ copied_config_sha256;
if (!old_parts.empty()) {
std::vector<LogFile> result;
for (UnsortedOldParts &p : old_parts) {
@@ -304,14 +340,46 @@
return a.first < b.first;
});
LogFile log_file;
+
+ // We want to use a single Configuration flatbuffer for all the parts to
+ // make downstream easier. Since this is an old log, it doesn't have a
+ // SHA256 in the header to rely on, so we need a way to detect duplicates.
+ //
+ // SHA256 is decently fast, so use that as a representative hash of the
+ // header.
+ auto header =
+ std::make_shared<SizePrefixedFlatbufferVector<LogFileHeader>>(
+ std::move(*ReadHeader(p.unsorted_parts[0].second)));
+
+ // Do a recursive copy to normalize the flatbuffer. Different
+ // configurations can be built different ways, and can even have their
+ // vtable out of order. Don't think and just trigger a copy.
+ FlatbufferDetachedBuffer<Configuration> config_copy =
+ RecursiveCopyFlatBuffer(header->message().configuration());
+
+ std::string config_copy_sha256 = Sha256(config_copy.span());
+
+ auto it = copied_config_sha256.find(config_copy_sha256);
+ if (it != copied_config_sha256.end()) {
+ log_file.config = it->second;
+ } else {
+ std::shared_ptr<const Configuration> config(
+ header, header->message().configuration());
+
+ copied_config_sha256.emplace(config_copy_sha256, config);
+ log_file.config = config;
+ }
+
for (std::pair<monotonic_clock::time_point, std::string> &f :
p.unsorted_parts) {
p.parts.parts.emplace_back(std::move(f.second));
}
+ p.parts.config = log_file.config;
log_file.parts.emplace_back(std::move(p.parts));
log_file.monotonic_start_time = log_file.parts[0].monotonic_start_time;
log_file.realtime_start_time = log_file.parts[0].realtime_start_time;
log_file.corrupted = corrupted;
+ log_file.name = p.name;
result.emplace_back(std::move(log_file));
}
@@ -328,7 +396,9 @@
new_file.logger_boot_uuid = logs.second.logger_boot_uuid;
new_file.monotonic_start_time = logs.second.monotonic_start_time;
new_file.realtime_start_time = logs.second.realtime_start_time;
+ new_file.name = logs.second.name;
new_file.corrupted = corrupted;
+ bool seen_part = false;
for (std::pair<const std::string, UnsortedLogParts> &parts :
logs.second.unsorted_parts) {
LogParts new_parts;
@@ -352,6 +422,31 @@
for (std::pair<std::string, int> &p : parts.second.parts) {
new_parts.parts.emplace_back(std::move(p.first));
}
+
+ if (!seen_part) {
+ auto header =
+ std::make_shared<SizePrefixedFlatbufferVector<LogFileHeader>>(
+ std::move(*ReadHeader(new_parts.parts[0])));
+
+ std::shared_ptr<const Configuration> config(
+ header, header->message().configuration());
+
+ FlatbufferDetachedBuffer<Configuration> config_copy =
+ RecursiveCopyFlatBuffer(header->message().configuration());
+
+ std::string config_copy_sha256 = Sha256(config_copy.span());
+
+ auto it = copied_config_sha256.find(config_copy_sha256);
+ if (it != copied_config_sha256.end()) {
+ new_file.config = it->second;
+ } else {
+ copied_config_sha256.emplace(config_copy_sha256, config);
+ new_file.config = config;
+ }
+ }
+ new_parts.config = new_file.config;
+ seen_part = true;
+
new_file.parts.emplace_back(std::move(new_parts));
}
result.emplace_back(std::move(new_file));
@@ -397,7 +492,8 @@
if (!file.logger_boot_uuid.empty()) {
stream << " \"logger_boot_uuid\": \"" << file.logger_boot_uuid << "\",\n";
}
- stream << " \"monotonic_start_time\": " << file.monotonic_start_time
+ stream << " \"config\": " << file.config.get();
+ stream << ",\n \"monotonic_start_time\": " << file.monotonic_start_time
<< ",\n \"realtime_start_time\": " << file.realtime_start_time
<< ",\n";
stream << " \"parts\": [\n";
@@ -424,7 +520,8 @@
if (!parts.source_boot_uuid.empty()) {
stream << " \"source_boot_uuid\": \"" << parts.source_boot_uuid << "\",\n";
}
- stream << " \"monotonic_start_time\": " << parts.monotonic_start_time
+ stream << " \"config\": " << parts.config.get();
+ stream << ",\n \"monotonic_start_time\": " << parts.monotonic_start_time
<< ",\n \"realtime_start_time\": " << parts.realtime_start_time
<< ",\n \"parts\": [";
diff --git a/aos/events/logging/logfile_sorting.h b/aos/events/logging/logfile_sorting.h
index bfe1aa4..c3d1dea 100644
--- a/aos/events/logging/logfile_sorting.h
+++ b/aos/events/logging/logfile_sorting.h
@@ -5,6 +5,7 @@
#include <string>
#include <vector>
+#include "aos/configuration.h"
#include "aos/events/logging/uuid.h"
#include "aos/time/time.h"
@@ -39,6 +40,10 @@
// Pre-sorted list of parts.
std::vector<std::string> parts;
+
+ // Configuration for all the log parts. This will be a single object for all
+ // log files with the same config.
+ std::shared_ptr<const aos::Configuration> config;
};
// Datastructure to hold parts from the same run of the logger which have no
@@ -56,11 +61,18 @@
aos::monotonic_clock::time_point monotonic_start_time;
aos::realtime_clock::time_point realtime_start_time;
+ // The name field in the log file headers.
+ std::string name;
+
// All the parts, unsorted.
std::vector<LogParts> parts;
// A list of parts which were corrupted and are unknown where they should go.
std::vector<std::string> corrupted;
+
+ // Configuration for all the log parts and files. This is a single
+ // object for log files with the same config.
+ std::shared_ptr<const aos::Configuration> config;
};
std::ostream &operator<<(std::ostream &stream, const LogFile &file);
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 969f221..f6772e8 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -623,8 +623,7 @@
parts_sorters_.emplace_back(std::move(part));
}
- node_ = configuration::GetNodeIndex(log_file_header()->configuration(),
- part0_node);
+ node_ = configuration::GetNodeIndex(configuration(), part0_node);
monotonic_start_time_ = monotonic_clock::max_time;
realtime_start_time_ = realtime_clock::max_time;
@@ -636,6 +635,15 @@
}
}
+std::vector<const LogParts *> NodeMerger::Parts() const {
+ std::vector<const LogParts *> p;
+ p.reserve(parts_sorters_.size());
+ for (const LogPartsSorter &parts_sorter : parts_sorters_) {
+ p.emplace_back(&parts_sorter.parts());
+ }
+ return p;
+}
+
Message *NodeMerger::Front() {
// Return the current Front if we have one, otherwise go compute one.
if (current_ != nullptr) {
@@ -695,7 +703,14 @@
.monotonic_remote_time = monotonic_clock::min_time,
.realtime_remote_time = realtime_clock::min_time,
.data = SizePrefixedFlatbufferVector<MessageHeader>::Empty()} {
- const Configuration *config = log_file_header()->configuration();
+ for (const LogParts *part : node_merger_.Parts()) {
+ if (!configuration_) {
+ configuration_ = part->config;
+ } else {
+ CHECK_EQ(configuration_.get(), part->config.get());
+ }
+ }
+ const Configuration *config = configuration_.get();
// Only fill out nodes_data_ if there are nodes. Otherwise everything gets
// pretty simple.
if (configuration::MultiNode(config)) {
@@ -728,7 +743,7 @@
}
void TimestampMapper::AddPeer(TimestampMapper *timestamp_mapper) {
- CHECK(configuration::MultiNode(log_file_header()->configuration()));
+ CHECK(configuration::MultiNode(configuration()));
CHECK_NE(timestamp_mapper->node(), node());
CHECK_LT(timestamp_mapper->node(), nodes_data_.size());
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index 61f16aa..088ce73 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -368,20 +368,14 @@
public:
LogPartsSorter(LogParts log_parts);
- // Returns the current log file header.
- // TODO(austin): Is this the header we want to report? Do we want a better
- // start time?
- // TODO(austin): Report a start time from the LogParts. Figure out how that
- // all works.
- const LogFileHeader *log_file_header() const {
- return parts_message_reader_.log_file_header();
- }
+ // Returns the parts that this is sorting messages from.
+ const LogParts &parts() const { return parts_message_reader_.parts(); }
monotonic_clock::time_point monotonic_start_time() const {
- return parts_message_reader_.parts().monotonic_start_time;
+ return parts().monotonic_start_time;
}
realtime_clock::time_point realtime_start_time() const {
- return parts_message_reader_.parts().realtime_start_time;
+ return parts().realtime_start_time;
}
// The time this data is sorted until.
@@ -421,10 +415,11 @@
// Node index in the configuration of this node.
int node() const { return node_; }
- // The log file header for one of the log files.
- const LogFileHeader *log_file_header() const {
- CHECK(!parts_sorters_.empty());
- return parts_sorters_[0].log_file_header();
+ // List of parts being sorted together.
+ std::vector<const LogParts *> Parts() const;
+
+ const Configuration *configuration() const {
+ return parts_sorters_[0].parts().config.get();
}
monotonic_clock::time_point monotonic_start_time() const {
@@ -481,10 +476,9 @@
// timestamps out of this queue. This lets us bootstrap time estimation
// without exploding memory usage worst case.
- // Returns a log file header for this node.
- const LogFileHeader *log_file_header() const {
- return node_merger_.log_file_header();
- }
+ std::vector<const LogParts *> Parts() const { return node_merger_.Parts(); }
+
+ const Configuration *configuration() const { return configuration_.get(); }
// Returns which node this is sorting for.
size_t node() const { return node_merger_.node(); }
@@ -559,6 +553,9 @@
// The node merger to source messages from.
NodeMerger node_merger_;
+
+ std::shared_ptr<const Configuration> configuration_;
+
// The buffer of messages for this node. These are not matched with any
// remote data.
std::deque<Message> messages_;
diff --git a/aos/events/logging/logfile_utils_test.cc b/aos/events/logging/logfile_utils_test.cc
index 9029ce1..7795e66 100644
--- a/aos/events/logging/logfile_utils_test.cc
+++ b/aos/events/logging/logfile_utils_test.cc
@@ -99,6 +99,7 @@
JsonToSizedFlatbuffer<LogFileHeader>(
R"({
"max_out_of_order_duration": 100000000,
+ "configuration": {},
"log_event_uuid": "30ef1283-81d7-4004-8c36-1c162dbcb2b2",
"parts_uuid": "2a05d725-5d5c-4c0b-af42-88de2f3c3876",
"parts_index": 0
@@ -143,6 +144,7 @@
JsonToSizedFlatbuffer<LogFileHeader>(
R"({
"max_out_of_order_duration": 100000000,
+ "configuration": {},
"log_event_uuid": "30ef1283-81d7-4004-8c36-1c162dbcb2b2",
"parts_uuid": "2a05d725-5d5c-4c0b-af42-88de2f3c3876",
"parts_index": 0
@@ -153,6 +155,7 @@
"max_out_of_order_duration": 200000000,
"monotonic_start_time": 0,
"realtime_start_time": 0,
+ "configuration": {},
"log_event_uuid": "30ef1283-81d7-4004-8c36-1c162dbcb2b2",
"parts_uuid": "2a05d725-5d5c-4c0b-af42-88de2f3c3876",
"parts_index": 1
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index 24c0427..d750caf 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -40,18 +40,6 @@
namespace aos {
namespace logger {
namespace {
-// Helper to safely read a header, or CHECK.
-SizePrefixedFlatbufferVector<LogFileHeader> MaybeReadHeaderOrDie(
- const std::vector<LogFile> &log_files) {
- CHECK_GE(log_files.size(), 1u) << ": Empty filenames list";
- CHECK_GE(log_files[0].parts.size(), 1u) << ": Empty filenames list";
- CHECK_GE(log_files[0].parts[0].parts.size(), 1u) << ": Empty filenames list";
- std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> result =
- ReadHeader(log_files[0].parts[0].parts[0]);
- CHECK(result);
- return result.value();
-}
-
std::string LogFileVectorToString(std::vector<LogFile> log_files) {
std::stringstream ss;
for (const auto f : log_files) {
@@ -929,8 +917,20 @@
LogReader::LogReader(std::vector<LogFile> log_files,
const Configuration *replay_configuration)
: log_files_(std::move(log_files)),
- log_file_header_(MaybeReadHeaderOrDie(log_files_)),
replay_configuration_(replay_configuration) {
+ CHECK_GT(log_files_.size(), 0u);
+ {
+ // Validate that we have the same config everwhere. This will be true if
+ // all the parts were sorted together and the configs match.
+ const Configuration *config = nullptr;
+ for (const LogFile &log_file : log_files) {
+ if (config == nullptr) {
+ config = log_file.config.get();
+ } else {
+ CHECK_EQ(config, log_file.config.get());
+ }
+ }
+ }
MakeRemappedConfig();
// Remap all existing remote timestamp channels. They will be recreated, and
@@ -1000,11 +1000,10 @@
if (remapped_configuration_buffer_) {
remapped_configuration_buffer_->Wipe();
}
- log_file_header_.Wipe();
}
const Configuration *LogReader::logged_configuration() const {
- return log_file_header_.message().configuration();
+ return log_files_[0].config.get();
}
const Configuration *LogReader::configuration() const {
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index 3bf3fa8..56b99e8 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -448,7 +448,7 @@
template <typename T>
bool HasChannel(std::string_view name, const Node *node = nullptr) {
- return configuration::GetChannel(log_file_header()->configuration(), name,
+ return configuration::GetChannel(logged_configuration(), name,
T::GetFullyQualifiedName(), "", node,
true) != nullptr;
}
@@ -457,13 +457,7 @@
return event_loop_factory_;
}
- const LogFileHeader *log_file_header() const {
- return &log_file_header_.message();
- }
-
- std::string_view name() const {
- return log_file_header()->name()->string_view();
- }
+ std::string_view name() const { return log_files_[0].name; }
// Set whether to exit the SimulatedEventLoopFactory when we finish reading
// the logfile.
@@ -490,10 +484,6 @@
const std::vector<LogFile> log_files_;
- // This is *a* log file header used to provide the logged config. The rest of
- // the header is likely distracting.
- SizePrefixedFlatbufferVector<LogFileHeader> log_file_header_;
-
// State per node.
class State {
public:
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 315a272..7313b8f 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -473,6 +473,8 @@
logger->logger = std::make_unique<Logger>(logger->event_loop.get());
logger->logger->set_polling_period(std::chrono::milliseconds(100));
+ logger->logger->set_name(absl::StrCat(
+ "name_prefix_", logger->event_loop->node()->name()->str()));
logger->event_loop->OnRun([logger, logfile_base, compress]() {
std::unique_ptr<MultiNodeLogNamer> namer =
std::make_unique<MultiNodeLogNamer>(
@@ -510,6 +512,9 @@
log_event_uuids.insert(log_file.log_event_uuid);
logger_nodes.emplace_back(log_file.logger_node);
both_uuids.insert(log_file.log_event_uuid);
+ EXPECT_TRUE(log_file.config);
+ EXPECT_EQ(log_file.name,
+ absl::StrCat("name_prefix_", log_file.logger_node));
for (const LogParts &part : log_file.parts) {
EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
@@ -520,6 +525,7 @@
EXPECT_TRUE(log_event_uuids.find(part.log_event_uuid) !=
log_event_uuids.end());
EXPECT_NE(part.node, "");
+ EXPECT_TRUE(log_file.config);
parts_uuids.insert(part.parts_uuid);
both_uuids.insert(part.parts_uuid);
}
diff --git a/debian/boringssl.patch b/debian/boringssl.patch
new file mode 100644
index 0000000..ceae8bc
--- /dev/null
+++ b/debian/boringssl.patch
@@ -0,0 +1,78 @@
+diff --git a/BUILD b/BUILD
+index cba9ccb1d..7890c88e6 100644
+--- a/BUILD
++++ b/BUILD
+@@ -25,6 +25,8 @@ load(
+ "crypto_sources",
+ "crypto_sources_linux_x86_64",
+ "crypto_sources_linux_ppc64le",
++ "crypto_sources_linux_aarch64",
++ "crypto_sources_linux_arm",
+ "crypto_sources_mac_x86_64",
+ "fips_fragments",
+ "ssl_headers",
+@@ -36,7 +38,17 @@ load(
+
+ config_setting(
+ name = "linux_x86_64",
+- values = {"cpu": "k8"},
++ constraint_values = ["@platforms//cpu:x86_64"],
++)
++
++config_setting(
++ name = "linux_aarch64",
++ constraint_values = ["@platforms//cpu:aarch64"],
++)
++
++config_setting(
++ name = "linux_arm",
++ constraint_values = ["@platforms//cpu:armv7"],
+ )
+
+ config_setting(
+@@ -76,6 +88,9 @@ posix_copts = [
+ "-Wwrite-strings",
+ "-Wshadow",
+ "-fno-common",
++ "-Wno-cast-qual",
++ "-Wno-cast-align",
++ "-Wno-unused-parameter",
+
+ # Modern build environments should be able to set this to use atomic
+ # operations for reference counting rather than locks. However, it's
+@@ -86,6 +101,8 @@ posix_copts = [
+ boringssl_copts = select({
+ ":linux_x86_64": posix_copts,
+ ":linux_ppc64le": posix_copts,
++ ":linux_arm": posix_copts,
++ ":linux_aarch64": posix_copts,
+ ":mac_x86_64": posix_copts,
+ ":windows_x86_64": [
+ "-DWIN32_LEAN_AND_MEAN",
+@@ -97,6 +114,8 @@ boringssl_copts = select({
+ crypto_sources_asm = select({
+ ":linux_x86_64": crypto_sources_linux_x86_64,
+ ":linux_ppc64le": crypto_sources_linux_ppc64le,
++ ":linux_arm": crypto_sources_linux_arm,
++ ":linux_aarch64": crypto_sources_linux_aarch64,
+ ":mac_x86_64": crypto_sources_mac_x86_64,
+ "//conditions:default": [],
+ })
+@@ -112,6 +131,8 @@ posix_copts_c11 = [
+ boringssl_copts_c11 = boringssl_copts + select({
+ ":linux_x86_64": posix_copts_c11,
+ ":linux_ppc64le": posix_copts_c11,
++ ":linux_arm": posix_copts_c11,
++ ":linux_aarch64": posix_copts_c11,
+ ":mac_x86_64": posix_copts_c11,
+ "//conditions:default": [],
+ })
+@@ -125,6 +146,8 @@ posix_copts_cxx = [
+ boringssl_copts_cxx = boringssl_copts + select({
+ ":linux_x86_64": posix_copts_cxx,
+ ":linux_ppc64le": posix_copts_cxx,
++ ":linux_arm": posix_copts_cxx,
++ ":linux_aarch64": posix_copts_cxx,
+ ":mac_x86_64": posix_copts_cxx,
+ "//conditions:default": [],
+ })