Squashed 'third_party/abseil/' content from commit ddf8e52a2
Change-Id: I330cdc687395c6605ad027b855e313ff4a2059e9
git-subtree-dir: third_party/abseil
git-subtree-split: ddf8e52a2918dd0ccec75d3e2426125fa3926724
diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel
new file mode 100644
index 0000000..b87c55a
--- /dev/null
+++ b/absl/debugging/BUILD.bazel
@@ -0,0 +1,314 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
+load(
+ "//absl:copts/configure_copts.bzl",
+ "ABSL_DEFAULT_COPTS",
+ "ABSL_DEFAULT_LINKOPTS",
+ "ABSL_TEST_COPTS",
+)
+
+package(
+ default_visibility = ["//visibility:public"],
+)
+
+licenses(["notice"]) # Apache 2.0
+
+cc_library(
+ name = "stacktrace",
+ srcs = [
+ "stacktrace.cc",
+ ],
+ hdrs = ["stacktrace.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":debugging_internal",
+ "//absl/base:core_headers",
+ ],
+)
+
+cc_library(
+ name = "symbolize",
+ srcs = [
+ "symbolize.cc",
+ "symbolize_elf.inc",
+ "symbolize_unimplemented.inc",
+ "symbolize_win32.inc",
+ ],
+ hdrs = [
+ "internal/symbolize.h",
+ "symbolize.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":debugging_internal",
+ ":demangle_internal",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
+ "//absl/base:malloc_internal",
+ "//absl/base:raw_logging_internal",
+ ],
+)
+
+cc_test(
+ name = "symbolize_test",
+ srcs = ["symbolize_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":stack_consumption",
+ ":symbolize",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "//absl/memory",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "examine_stack",
+ srcs = [
+ "internal/examine_stack.cc",
+ ],
+ hdrs = [
+ "internal/examine_stack.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":stacktrace",
+ ":symbolize",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ ],
+)
+
+cc_library(
+ name = "failure_signal_handler",
+ srcs = ["failure_signal_handler.cc"],
+ hdrs = ["failure_signal_handler.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":examine_stack",
+ ":stacktrace",
+ "//absl/base",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ ],
+)
+
+cc_test(
+ name = "failure_signal_handler_test",
+ srcs = ["failure_signal_handler_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = select({
+ "//absl:windows": [],
+ "//conditions:default": ["-pthread"],
+ }) + ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":failure_signal_handler",
+ ":stacktrace",
+ ":symbolize",
+ "//absl/base:raw_logging_internal",
+ "//absl/strings",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "debugging_internal",
+ srcs = [
+ "internal/address_is_readable.cc",
+ "internal/elf_mem_image.cc",
+ "internal/vdso_support.cc",
+ ],
+ hdrs = [
+ "internal/address_is_readable.h",
+ "internal/elf_mem_image.h",
+ "internal/stacktrace_aarch64-inl.inc",
+ "internal/stacktrace_arm-inl.inc",
+ "internal/stacktrace_config.h",
+ "internal/stacktrace_generic-inl.inc",
+ "internal/stacktrace_powerpc-inl.inc",
+ "internal/stacktrace_unimplemented-inl.inc",
+ "internal/stacktrace_win32-inl.inc",
+ "internal/stacktrace_x86-inl.inc",
+ "internal/vdso_support.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
+ "//absl/base:raw_logging_internal",
+ ],
+)
+
+cc_library(
+ name = "demangle_internal",
+ srcs = ["internal/demangle.cc"],
+ hdrs = ["internal/demangle.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ deps = [
+ "//absl/base",
+ "//absl/base:core_headers",
+ ],
+)
+
+cc_test(
+ name = "demangle_test",
+ srcs = ["internal/demangle_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":demangle_internal",
+ ":stack_consumption",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "//absl/memory",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "leak_check",
+ srcs = ["leak_check.cc"],
+ hdrs = ["leak_check.h"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = ["//absl/base:core_headers"],
+)
+
+# Adding a dependency to leak_check_disable will disable
+# sanitizer leak checking (asan/lsan) in a test without
+# the need to mess around with build features.
+cc_library(
+ name = "leak_check_disable",
+ srcs = ["leak_check_disable.cc"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ linkstatic = 1,
+ alwayslink = 1,
+)
+
+# These targets exists for use in tests only, explicitly configuring the
+# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan.
+ABSL_LSAN_LINKOPTS = select({
+ "//absl:llvm_compiler": ["-fsanitize=leak"],
+ "//conditions:default": [],
+})
+
+cc_library(
+ name = "leak_check_api_enabled_for_testing",
+ testonly = 1,
+ srcs = ["leak_check.cc"],
+ hdrs = ["leak_check.h"],
+ copts = select({
+ "//absl:llvm_compiler": ["-DLEAK_SANITIZER"],
+ "//conditions:default": [],
+ }),
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+)
+
+cc_library(
+ name = "leak_check_api_disabled_for_testing",
+ testonly = 1,
+ srcs = ["leak_check.cc"],
+ hdrs = ["leak_check.h"],
+ copts = ["-ULEAK_SANITIZER"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+)
+
+cc_test(
+ name = "leak_check_test",
+ srcs = ["leak_check_test.cc"],
+ copts = select({
+ "//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"],
+ "//conditions:default": [],
+ }),
+ linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
+ tags = ["notsan"],
+ deps = [
+ ":leak_check_api_enabled_for_testing",
+ "//absl/base",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "leak_check_no_lsan_test",
+ srcs = ["leak_check_test.cc"],
+ copts = ["-UABSL_EXPECT_LEAK_SANITIZER"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["noasan"],
+ deps = [
+ ":leak_check_api_disabled_for_testing",
+ "//absl/base", # for raw_logging
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+# Test that leak checking is skipped when lsan is enabled but
+# ":leak_check_disable" is linked in.
+#
+# This test should fail in the absence of a dependency on ":leak_check_disable"
+cc_test(
+ name = "disabled_leak_check_test",
+ srcs = ["leak_check_fail_test.cc"],
+ linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
+ tags = ["notsan"],
+ deps = [
+ ":leak_check_api_enabled_for_testing",
+ ":leak_check_disable",
+ "//absl/base",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "stack_consumption",
+ testonly = 1,
+ srcs = ["internal/stack_consumption.cc"],
+ hdrs = ["internal/stack_consumption.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ ],
+)
+
+cc_test(
+ name = "stack_consumption_test",
+ srcs = ["internal/stack_consumption_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":stack_consumption",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
diff --git a/absl/debugging/CMakeLists.txt b/absl/debugging/CMakeLists.txt
new file mode 100644
index 0000000..c409e33
--- /dev/null
+++ b/absl/debugging/CMakeLists.txt
@@ -0,0 +1,321 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+absl_cc_library(
+ NAME
+ stacktrace
+ HDRS
+ "stacktrace.h"
+ SRCS
+ "stacktrace.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::debugging_internal
+ absl::core_headers
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ symbolize
+ HDRS
+ "symbolize.h"
+ "internal/symbolize.h"
+ SRCS
+ "symbolize.cc"
+ "symbolize_elf.inc"
+ "symbolize_unimplemented.inc"
+ "symbolize_win32.inc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::debugging_internal
+ absl::demangle_internal
+ absl::base
+ absl::core_headers
+ absl::dynamic_annotations
+ absl::malloc_internal
+ absl::raw_logging_internal
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ symbolize_test
+ SRCS
+ "symbolize_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::stack_consumption
+ absl::symbolize
+ absl::base
+ absl::core_headers
+ absl::memory
+ absl::raw_logging_internal
+ gmock
+)
+
+absl_cc_library(
+ NAME
+ examine_stack
+ HDRS
+ "internal/examine_stack.h"
+ SRCS
+ "internal/examine_stack.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::stacktrace
+ absl::symbolize
+ absl::core_headers
+ absl::raw_logging_internal
+)
+
+absl_cc_library(
+ NAME
+ failure_signal_handler
+ HDRS
+ "failure_signal_handler.h"
+ SRCS
+ "failure_signal_handler.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::examine_stack
+ absl::stacktrace
+ absl::base
+ absl::config
+ absl::core_headers
+ absl::raw_logging_internal
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ failure_signal_handler_test
+ SRCS
+ "failure_signal_handler_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::failure_signal_handler
+ absl::stacktrace
+ absl::symbolize
+ absl::strings
+ absl::raw_logging_internal
+ Threads::Threads
+ gmock
+)
+
+absl_cc_library(
+ NAME
+ debugging_internal
+ HDRS
+ "internal/address_is_readable.h"
+ "internal/elf_mem_image.h"
+ "internal/stacktrace_aarch64-inl.inc"
+ "internal/stacktrace_arm-inl.inc"
+ "internal/stacktrace_config.h"
+ "internal/stacktrace_generic-inl.inc"
+ "internal/stacktrace_powerpc-inl.inc"
+ "internal/stacktrace_unimplemented-inl.inc"
+ "internal/stacktrace_win32-inl.inc"
+ "internal/stacktrace_x86-inl.inc"
+ "internal/vdso_support.h"
+ SRCS
+ "internal/address_is_readable.cc"
+ "internal/elf_mem_image.cc"
+ "internal/vdso_support.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::core_headers
+ absl::dynamic_annotations
+ absl::raw_logging_internal
+)
+
+absl_cc_library(
+ NAME
+ demangle_internal
+ HDRS
+ "internal/demangle.h"
+ SRCS
+ "internal/demangle.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::base
+ absl::core_headers
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ demangle_test
+ SRCS
+ "internal/demangle_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::demangle_internal
+ absl::stack_consumption
+ absl::core_headers
+ absl::memory
+ absl::raw_logging_internal
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ leak_check
+ HDRS
+ "leak_check.h"
+ SRCS
+ "leak_check.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::core_headers
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ leak_check_disable
+ SRCS
+ "leak_check_disable.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ leak_check_api_enabled_for_testing
+ HDRS
+ "leak_check.h"
+ SRCS
+ "leak_check.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ $<$<BOOL:${ABSL_HAVE_LSAN}>:-DLEAK_SANITIZER>
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ leak_check_api_disabled_for_testing
+ HDRS
+ "leak_check.h"
+ SRCS
+ "leak_check.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ "-ULEAK_SANITIZER"
+ TESTONLY
+)
+
+absl_cc_test(
+ NAME
+ leak_check_test
+ SRCS
+ "leak_check_test.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ "$<$<BOOL:${ABSL_HAVE_LSAN}>:-DABSL_EXPECT_LEAK_SANITIZER>"
+ LINKOPTS
+ "${ABSL_LSAN_LINKOPTS}"
+ DEPS
+ absl::leak_check_api_enabled_for_testing
+ absl::base
+ gmock_main
+)
+
+absl_cc_test(
+ NAME
+ leak_check_no_lsan_test
+ SRCS
+ "leak_check_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ "-UABSL_EXPECT_LEAK_SANITIZER"
+ DEPS
+ absl::leak_check_api_disabled_for_testing
+ absl::base
+ gmock_main
+)
+
+absl_cc_test(
+ NAME
+ disabled_leak_check_test
+ SRCS
+ "leak_check_fail_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ LINKOPTS
+ "${ABSL_LSAN_LINKOPTS}"
+ DEPS
+ absl::leak_check_api_enabled_for_testing
+ absl::leak_check_disable
+ absl::base
+ absl::raw_logging_internal
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ stack_consumption
+ HDRS
+ "internal/stack_consumption.h"
+ SRCS
+ "internal/stack_consumption.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::core_headers
+ absl::raw_logging_internal
+ TESTONLY
+)
+
+absl_cc_test(
+ NAME
+ stack_consumption_test
+ SRCS
+ "internal/stack_consumption_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::stack_consumption
+ absl::core_headers
+ absl::raw_logging_internal
+ gmock_main
+)
+
+# component target
+absl_cc_library(
+ NAME
+ debugging
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::stacktrace
+ absl::leak_check
+ PUBLIC
+)
diff --git a/absl/debugging/failure_signal_handler.cc b/absl/debugging/failure_signal_handler.cc
new file mode 100644
index 0000000..c6a4d96
--- /dev/null
+++ b/absl/debugging/failure_signal_handler.cc
@@ -0,0 +1,359 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "absl/debugging/failure_signal_handler.h"
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+
+#ifdef ABSL_HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <csignal>
+#include <cstdio>
+#include <cstring>
+#include <ctime>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/debugging/internal/examine_stack.h"
+#include "absl/debugging/stacktrace.h"
+
+#ifndef _WIN32
+#define ABSL_HAVE_SIGACTION
+#endif
+
+namespace absl {
+
+ABSL_CONST_INIT static FailureSignalHandlerOptions fsh_options;
+
+// Resets the signal handler for signo to the default action for that
+// signal, then raises the signal.
+static void RaiseToDefaultHandler(int signo) {
+ signal(signo, SIG_DFL);
+ raise(signo);
+}
+
+struct FailureSignalData {
+ const int signo;
+ const char* const as_string;
+#ifdef ABSL_HAVE_SIGACTION
+ struct sigaction previous_action;
+ // StructSigaction is used to silence -Wmissing-field-initializers.
+ using StructSigaction = struct sigaction;
+ #define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
+#else
+ void (*previous_handler)(int);
+ #define FSD_PREVIOUS_INIT SIG_DFL
+#endif
+};
+
+ABSL_CONST_INIT static FailureSignalData failure_signal_data[] = {
+ {SIGSEGV, "SIGSEGV", FSD_PREVIOUS_INIT},
+ {SIGILL, "SIGILL", FSD_PREVIOUS_INIT},
+ {SIGFPE, "SIGFPE", FSD_PREVIOUS_INIT},
+ {SIGABRT, "SIGABRT", FSD_PREVIOUS_INIT},
+ {SIGTERM, "SIGTERM", FSD_PREVIOUS_INIT},
+#ifndef _WIN32
+ {SIGBUS, "SIGBUS", FSD_PREVIOUS_INIT},
+ {SIGTRAP, "SIGTRAP", FSD_PREVIOUS_INIT},
+#endif
+};
+
+#undef FSD_PREVIOUS_INIT
+
+static void RaiseToPreviousHandler(int signo) {
+ // Search for the previous handler.
+ for (const auto& it : failure_signal_data) {
+ if (it.signo == signo) {
+#ifdef ABSL_HAVE_SIGACTION
+ sigaction(signo, &it.previous_action, nullptr);
+#else
+ signal(signo, it.previous_handler);
+#endif
+ raise(signo);
+ return;
+ }
+ }
+
+ // Not found, use the default handler.
+ RaiseToDefaultHandler(signo);
+}
+
+namespace debugging_internal {
+
+const char* FailureSignalToString(int signo) {
+ for (const auto& it : failure_signal_data) {
+ if (it.signo == signo) {
+ return it.as_string;
+ }
+ }
+ return "";
+}
+
+} // namespace debugging_internal
+
+#ifndef _WIN32
+
+static bool SetupAlternateStackOnce() {
+#if defined(__wasm__) || defined (__asjms__)
+ const size_t page_mask = getpagesize() - 1;
+#else
+ const size_t page_mask = sysconf(_SC_PAGESIZE) - 1;
+#endif
+ size_t stack_size = (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask;
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Account for sanitizer instrumentation requiring additional stack space.
+ stack_size *= 5;
+#endif
+
+ stack_t sigstk;
+ memset(&sigstk, 0, sizeof(sigstk));
+ sigstk.ss_size = stack_size;
+
+#ifdef ABSL_HAVE_MMAP
+#ifndef MAP_STACK
+#define MAP_STACK 0
+#endif
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+ sigstk.ss_sp = mmap(nullptr, sigstk.ss_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (sigstk.ss_sp == MAP_FAILED) {
+ ABSL_RAW_LOG(FATAL, "mmap() for alternate signal stack failed");
+ }
+#else
+ sigstk.ss_sp = malloc(sigstk.ss_size);
+ if (sigstk.ss_sp == nullptr) {
+ ABSL_RAW_LOG(FATAL, "malloc() for alternate signal stack failed");
+ }
+#endif
+
+ if (sigaltstack(&sigstk, nullptr) != 0) {
+ ABSL_RAW_LOG(FATAL, "sigaltstack() failed with errno=%d", errno);
+ }
+ return true;
+}
+
+#endif
+
+#ifdef ABSL_HAVE_SIGACTION
+
+// Sets up an alternate stack for signal handlers once.
+// Returns the appropriate flag for sig_action.sa_flags
+// if the system supports using an alternate stack.
+static int MaybeSetupAlternateStack() {
+#ifndef _WIN32
+ ABSL_ATTRIBUTE_UNUSED static const bool kOnce = SetupAlternateStackOnce();
+ return SA_ONSTACK;
+#else
+ return 0;
+#endif
+}
+
+static void InstallOneFailureHandler(FailureSignalData* data,
+ void (*handler)(int, siginfo_t*, void*)) {
+ struct sigaction act;
+ memset(&act, 0, sizeof(act));
+ sigemptyset(&act.sa_mask);
+ act.sa_flags |= SA_SIGINFO;
+ // SA_NODEFER is required to handle SIGABRT from
+ // ImmediateAbortSignalHandler().
+ act.sa_flags |= SA_NODEFER;
+ if (fsh_options.use_alternate_stack) {
+ act.sa_flags |= MaybeSetupAlternateStack();
+ }
+ act.sa_sigaction = handler;
+ ABSL_RAW_CHECK(sigaction(data->signo, &act, &data->previous_action) == 0,
+ "sigaction() failed");
+}
+
+#else
+
+static void InstallOneFailureHandler(FailureSignalData* data,
+ void (*handler)(int)) {
+ data->previous_handler = signal(data->signo, handler);
+ ABSL_RAW_CHECK(data->previous_handler != SIG_ERR, "signal() failed");
+}
+
+#endif
+
+static void WriteToStderr(const char* data) {
+ int old_errno = errno;
+ absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
+ errno = old_errno;
+}
+
+static void WriteSignalMessage(int signo, void (*writerfn)(const char*)) {
+ char buf[64];
+ const char* const signal_string =
+ debugging_internal::FailureSignalToString(signo);
+ if (signal_string != nullptr && signal_string[0] != '\0') {
+ snprintf(buf, sizeof(buf), "*** %s received at time=%ld ***\n",
+ signal_string,
+ static_cast<long>(time(nullptr))); // NOLINT(runtime/int)
+ } else {
+ snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld ***\n",
+ signo, static_cast<long>(time(nullptr))); // NOLINT(runtime/int)
+ }
+ writerfn(buf);
+}
+
+// `void*` might not be big enough to store `void(*)(const char*)`.
+struct WriterFnStruct {
+ void (*writerfn)(const char*);
+};
+
+// Many of the absl::debugging_internal::Dump* functions in
+// examine_stack.h take a writer function pointer that has a void* arg
+// for historical reasons. failure_signal_handler_writer only takes a
+// data pointer. This function converts between these types.
+static void WriterFnWrapper(const char* data, void* arg) {
+ static_cast<WriterFnStruct*>(arg)->writerfn(data);
+}
+
+// Convenient wrapper around DumpPCAndFrameSizesAndStackTrace() for signal
+// handlers. "noinline" so that GetStackFrames() skips the top-most stack
+// frame for this function.
+ABSL_ATTRIBUTE_NOINLINE static void WriteStackTrace(
+ void* ucontext, bool symbolize_stacktrace,
+ void (*writerfn)(const char*, void*), void* writerfn_arg) {
+ constexpr int kNumStackFrames = 32;
+ void* stack[kNumStackFrames];
+ int frame_sizes[kNumStackFrames];
+ int min_dropped_frames;
+ int depth = absl::GetStackFramesWithContext(
+ stack, frame_sizes, kNumStackFrames,
+ 1, // Do not include this function in stack trace.
+ ucontext, &min_dropped_frames);
+ absl::debugging_internal::DumpPCAndFrameSizesAndStackTrace(
+ absl::debugging_internal::GetProgramCounter(ucontext), stack, frame_sizes,
+ depth, min_dropped_frames, symbolize_stacktrace, writerfn, writerfn_arg);
+}
+
+// Called by AbslFailureSignalHandler() to write the failure info. It is
+// called once with writerfn set to WriteToStderr() and then possibly
+// with writerfn set to the user provided function.
+static void WriteFailureInfo(int signo, void* ucontext,
+ void (*writerfn)(const char*)) {
+ WriterFnStruct writerfn_struct{writerfn};
+ WriteSignalMessage(signo, writerfn);
+ WriteStackTrace(ucontext, fsh_options.symbolize_stacktrace, WriterFnWrapper,
+ &writerfn_struct);
+}
+
+// absl::SleepFor() can't be used here since AbslInternalSleepFor()
+// may be overridden to do something that isn't async-signal-safe on
+// some platforms.
+static void PortableSleepForSeconds(int seconds) {
+#ifdef _WIN32
+ Sleep(seconds * 1000);
+#else
+ struct timespec sleep_time;
+ sleep_time.tv_sec = seconds;
+ sleep_time.tv_nsec = 0;
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {}
+#endif
+}
+
+#ifdef ABSL_HAVE_ALARM
+// AbslFailureSignalHandler() installs this as a signal handler for
+// SIGALRM, then sets an alarm to be delivered to the program after a
+// set amount of time. If AbslFailureSignalHandler() hangs for more than
+// the alarm timeout, ImmediateAbortSignalHandler() will abort the
+// program.
+static void ImmediateAbortSignalHandler(int) {
+ RaiseToDefaultHandler(SIGABRT);
+}
+#endif
+
+// absl::base_internal::GetTID() returns pid_t on most platforms, but
+// returns absl::base_internal::pid_t on Windows.
+using GetTidType = decltype(absl::base_internal::GetTID());
+ABSL_CONST_INIT static std::atomic<GetTidType> failed_tid(0);
+
+#ifndef ABSL_HAVE_SIGACTION
+static void AbslFailureSignalHandler(int signo) {
+ void* ucontext = nullptr;
+#else
+static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
+#endif
+
+ const GetTidType this_tid = absl::base_internal::GetTID();
+ GetTidType previous_failed_tid = 0;
+ if (!failed_tid.compare_exchange_strong(
+ previous_failed_tid, static_cast<intptr_t>(this_tid),
+ std::memory_order_acq_rel, std::memory_order_relaxed)) {
+ ABSL_RAW_LOG(
+ ERROR,
+ "Signal %d raised at PC=%p while already in AbslFailureSignalHandler()",
+ signo, absl::debugging_internal::GetProgramCounter(ucontext));
+ if (this_tid != previous_failed_tid) {
+ // Another thread is already in AbslFailureSignalHandler(), so wait
+ // a bit for it to finish. If the other thread doesn't kill us,
+ // we do so after sleeping.
+ PortableSleepForSeconds(3);
+ RaiseToDefaultHandler(signo);
+ // The recursively raised signal may be blocked until we return.
+ return;
+ }
+ }
+
+#ifdef ABSL_HAVE_ALARM
+ // Set an alarm to abort the program in case this code hangs or deadlocks.
+ if (fsh_options.alarm_on_failure_secs > 0) {
+ alarm(0); // Cancel any existing alarms.
+ signal(SIGALRM, ImmediateAbortSignalHandler);
+ alarm(fsh_options.alarm_on_failure_secs);
+ }
+#endif
+
+ // First write to stderr.
+ WriteFailureInfo(signo, ucontext, WriteToStderr);
+
+ // Riskier code (because it is less likely to be async-signal-safe)
+ // goes after this point.
+ if (fsh_options.writerfn != nullptr) {
+ WriteFailureInfo(signo, ucontext, fsh_options.writerfn);
+ }
+
+ if (fsh_options.call_previous_handler) {
+ RaiseToPreviousHandler(signo);
+ } else {
+ RaiseToDefaultHandler(signo);
+ }
+}
+
+void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options) {
+ fsh_options = options;
+ for (auto& it : failure_signal_data) {
+ InstallOneFailureHandler(&it, AbslFailureSignalHandler);
+ }
+}
+
+} // namespace absl
diff --git a/absl/debugging/failure_signal_handler.h b/absl/debugging/failure_signal_handler.h
new file mode 100644
index 0000000..1beb78b
--- /dev/null
+++ b/absl/debugging/failure_signal_handler.h
@@ -0,0 +1,117 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: failure_signal_handler.h
+// -----------------------------------------------------------------------------
+//
+// This file configures the Abseil *failure signal handler* to capture and dump
+// useful debugging information (such as a stacktrace) upon program failure.
+//
+// To use the failure signal handler, call `absl::InstallFailureSignalHandler()`
+// very early in your program, usually in the first few lines of main():
+//
+// int main(int argc, char** argv) {
+// // Initialize the symbolizer to get a human-readable stack trace
+// absl::InitializeSymbolizer(argv[0]);
+//
+// absl::FailureSignalHandlerOptions options;
+// absl::InstallFailureSignalHandler(options);
+// DoSomethingInteresting();
+// return 0;
+// }
+//
+// Any program that raises a fatal signal (such as `SIGSEGV`, `SIGILL`,
+// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP`) will call the
+// installed failure signal handler and provide debugging information to stderr.
+//
+// Note that you should *not* install the Abseil failure signal handler more
+// than once. You may, of course, have another (non-Abseil) failure signal
+// handler installed (which would be triggered if Abseil's failure signal
+// handler sets `call_previous_handler` to `true`).
+
+#ifndef ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_
+#define ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_
+
+namespace absl {
+
+// FailureSignalHandlerOptions
+//
+// Struct for holding `absl::InstallFailureSignalHandler()` configuration
+// options.
+struct FailureSignalHandlerOptions {
+ // If true, try to symbolize the stacktrace emitted on failure, provided that
+ // you have initialized a symbolizer for that purpose. (See symbolize.h for
+ // more information.)
+ bool symbolize_stacktrace = true;
+
+ // If true, try to run signal handlers on an alternate stack (if supported on
+ // the given platform). An alternate stack is useful for program crashes due
+ // to a stack overflow; by running on a alternate stack, the signal handler
+ // may run even when normal stack space has been exausted. The downside of
+ // using an alternate stack is that extra memory for the alternate stack needs
+ // to be pre-allocated.
+ bool use_alternate_stack = true;
+
+ // If positive, indicates the number of seconds after which the failure signal
+ // handler is invoked to abort the program. Setting such an alarm is useful in
+ // cases where the failure signal handler itself may become hung or
+ // deadlocked.
+ int alarm_on_failure_secs = 3;
+
+ // If true, call the previously registered signal handler for the signal that
+ // was received (if one was registered) after the existing signal handler
+ // runs. This mechanism can be used to chain signal handlers together.
+ //
+ // If false, the signal is raised to the default handler for that signal
+ // (which normally terminates the program).
+ //
+ // IMPORTANT: If true, the chained fatal signal handlers must not try to
+ // recover from the fatal signal. Instead, they should terminate the program
+ // via some mechanism, like raising the default handler for the signal, or by
+ // calling `_exit()`. Note that the failure signal handler may put parts of
+ // the Abseil library into a state from which they cannot recover.
+ bool call_previous_handler = false;
+
+ // If non-null, indicates a pointer to a callback function that will be called
+ // upon failure, with a std::string argument containing failure data. This function
+ // may be used as a hook to write failure data to a secondary location, such
+ // as a log file. This function may also be called with null data, as a hint
+ // to flush any buffered data before the program may be terminated. Consider
+ // flushing any buffered data in all calls to this function.
+ //
+ // Since this function runs within a signal handler, it should be
+ // async-signal-safe if possible.
+ // See http://man7.org/linux/man-pages/man7/signal-safety.7.html
+ void (*writerfn)(const char*) = nullptr;
+};
+
+// InstallFailureSignalHandler()
+//
+// Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`,
+// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist
+// on the given platform). The failure signal handler dumps program failure data
+// useful for debugging in an unspecified format to stderr. This data may
+// include the program counter, a stacktrace, and register information on some
+// systems; do not rely on an exact format for the output, as it is subject to
+// change.
+void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options);
+
+namespace debugging_internal {
+const char* FailureSignalToString(int signo);
+} // namespace debugging_internal
+
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_
diff --git a/absl/debugging/failure_signal_handler_test.cc b/absl/debugging/failure_signal_handler_test.cc
new file mode 100644
index 0000000..bb2cc48
--- /dev/null
+++ b/absl/debugging/failure_signal_handler_test.cc
@@ -0,0 +1,156 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "absl/debugging/failure_signal_handler.h"
+
+#include <csignal>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/symbolize.h"
+#include "absl/strings/match.h"
+#include "absl/strings/str_cat.h"
+
+namespace {
+
+#if GTEST_HAS_DEATH_TEST
+
+// For the parameterized death tests. GetParam() returns the signal number.
+using FailureSignalHandlerDeathTest = ::testing::TestWithParam<int>;
+
+// This function runs in a fork()ed process on most systems.
+void InstallHandlerAndRaise(int signo) {
+ absl::InstallFailureSignalHandler(absl::FailureSignalHandlerOptions());
+ raise(signo);
+}
+
+TEST_P(FailureSignalHandlerDeathTest, AbslFailureSignal) {
+ const int signo = GetParam();
+ std::string exit_regex = absl::StrCat(
+ "\\*\\*\\* ", absl::debugging_internal::FailureSignalToString(signo),
+ " received at time=");
+#ifndef _WIN32
+ EXPECT_EXIT(InstallHandlerAndRaise(signo), testing::KilledBySignal(signo),
+ exit_regex);
+#else
+ // Windows doesn't have testing::KilledBySignal().
+ EXPECT_DEATH(InstallHandlerAndRaise(signo), exit_regex);
+#endif
+}
+
+ABSL_CONST_INIT FILE* error_file = nullptr;
+
+void WriteToErrorFile(const char* msg) {
+ if (msg != nullptr) {
+ ABSL_RAW_CHECK(fwrite(msg, strlen(msg), 1, error_file) == 1,
+ "fwrite() failed");
+ }
+ ABSL_RAW_CHECK(fflush(error_file) == 0, "fflush() failed");
+}
+
+std::string GetTmpDir() {
+ // TEST_TMPDIR is set by Bazel. Try the others when not running under Bazel.
+ static const char* const kTmpEnvVars[] = {"TEST_TMPDIR", "TMPDIR", "TEMP",
+ "TEMPDIR", "TMP"};
+ for (const char* const var : kTmpEnvVars) {
+ const char* tmp_dir = std::getenv(var);
+ if (tmp_dir != nullptr) {
+ return tmp_dir;
+ }
+ }
+
+ // Try something reasonable.
+ return "/tmp";
+}
+
+// This function runs in a fork()ed process on most systems.
+void InstallHandlerWithWriteToFileAndRaise(const char* file, int signo) {
+ error_file = fopen(file, "w");
+ ABSL_RAW_CHECK(error_file != nullptr, "Failed create error_file");
+ absl::FailureSignalHandlerOptions options;
+ options.writerfn = WriteToErrorFile;
+ absl::InstallFailureSignalHandler(options);
+ raise(signo);
+}
+
+TEST_P(FailureSignalHandlerDeathTest, AbslFatalSignalsWithWriterFn) {
+ const int signo = GetParam();
+ std::string tmp_dir = GetTmpDir();
+ std::string file = absl::StrCat(tmp_dir, "/signo_", signo);
+
+ std::string exit_regex = absl::StrCat(
+ "\\*\\*\\* ", absl::debugging_internal::FailureSignalToString(signo),
+ " received at time=");
+#ifndef _WIN32
+ EXPECT_EXIT(InstallHandlerWithWriteToFileAndRaise(file.c_str(), signo),
+ testing::KilledBySignal(signo), exit_regex);
+#else
+ // Windows doesn't have testing::KilledBySignal().
+ EXPECT_DEATH(InstallHandlerWithWriteToFileAndRaise(file.c_str(), signo),
+ exit_regex);
+#endif
+
+ // Open the file in this process and check its contents.
+ std::fstream error_output(file);
+ ASSERT_TRUE(error_output.is_open()) << file;
+ std::string error_line;
+ std::getline(error_output, error_line);
+ EXPECT_TRUE(absl::StartsWith(
+ error_line,
+ absl::StrCat("*** ",
+ absl::debugging_internal::FailureSignalToString(signo),
+ " received at ")));
+
+ if (absl::debugging_internal::StackTraceWorksForTest()) {
+ std::getline(error_output, error_line);
+ EXPECT_TRUE(absl::StartsWith(error_line, "PC: "));
+ }
+}
+
+constexpr int kFailureSignals[] = {
+ SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGTERM,
+#ifndef _WIN32
+ SIGBUS, SIGTRAP,
+#endif
+};
+
+std::string SignalParamToString(const ::testing::TestParamInfo<int>& info) {
+ std::string result =
+ absl::debugging_internal::FailureSignalToString(info.param);
+ if (result.empty()) {
+ result = absl::StrCat(info.param);
+ }
+ return result;
+}
+
+INSTANTIATE_TEST_SUITE_P(AbslDeathTest, FailureSignalHandlerDeathTest,
+ ::testing::ValuesIn(kFailureSignals),
+ SignalParamToString);
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace
+
+int main(int argc, char** argv) {
+ absl::InitializeSymbolizer(argv[0]);
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/absl/debugging/internal/address_is_readable.cc b/absl/debugging/internal/address_is_readable.cc
new file mode 100644
index 0000000..99c4c64
--- /dev/null
+++ b/absl/debugging/internal/address_is_readable.cc
@@ -0,0 +1,133 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// base::AddressIsReadable() probes an address to see whether it is readable,
+// without faulting.
+
+#include "absl/debugging/internal/address_is_readable.h"
+
+#if !defined(__linux__) || defined(__ANDROID__)
+
+namespace absl {
+namespace debugging_internal {
+
+// On platforms other than Linux, just return true.
+bool AddressIsReadable(const void* /* addr */) { return true; }
+
+} // namespace debugging_internal
+} // namespace absl
+
+#else
+
+#include <fcntl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <atomic>
+#include <cerrno>
+#include <cstdint>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+namespace debugging_internal {
+
+// Pack a pid and two file descriptors into a 64-bit word,
+// using 16, 24, and 24 bits for each respectively.
+static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
+ ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
+ "fd out of range");
+ return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
+}
+
+// Unpack x into a pid and two file descriptors, where x was created with
+// Pack().
+static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
+ *pid = x >> 48;
+ *read_fd = (x >> 24) & 0xffffff;
+ *write_fd = x & 0xffffff;
+}
+
+// Return whether the byte at *addr is readable, without faulting.
+// Save and restores errno. Returns true on systems where
+// unimplemented.
+// This is a namespace-scoped variable for correct zero-initialization.
+static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
+bool AddressIsReadable(const void *addr) {
+ int save_errno = errno;
+ // We test whether a byte is readable by using write(). Normally, this would
+ // be done via a cached file descriptor to /dev/null, but linux fails to
+ // check whether the byte is readable when the destination is /dev/null, so
+ // we use a cached pipe. We store the pid of the process that created the
+ // pipe to handle the case where a process forks, and the child closes all
+ // the file descriptors and then calls this routine. This is not perfect:
+ // the child could use the routine, then close all file descriptors and then
+ // use this routine again. But the likely use of this routine is when
+ // crashing, to test the validity of pages when dumping the stack. Beware
+ // that we may leak file descriptors, but we're unlikely to leak many.
+ int bytes_written;
+ int current_pid = getpid() & 0xffff; // we use only the low order 16 bits
+ do { // until we do not get EBADF trying to use file descriptors
+ int pid;
+ int read_fd;
+ int write_fd;
+ uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed);
+ Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
+ while (current_pid != pid) {
+ int p[2];
+ // new pipe
+ if (pipe(p) != 0) {
+ ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
+ }
+ fcntl(p[0], F_SETFD, FD_CLOEXEC);
+ fcntl(p[1], F_SETFD, FD_CLOEXEC);
+ uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
+ if (pid_and_fds.compare_exchange_strong(
+ local_pid_and_fds, new_pid_and_fds, std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
+ local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
+ } else { // fds not exposed to other threads; we can close them.
+ close(p[0]);
+ close(p[1]);
+ local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed);
+ }
+ Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
+ }
+ errno = 0;
+ // Use syscall(SYS_write, ...) instead of write() to prevent ASAN
+ // and other checkers from complaining about accesses to arbitrary
+ // memory.
+ do {
+ bytes_written = syscall(SYS_write, write_fd, addr, 1);
+ } while (bytes_written == -1 && errno == EINTR);
+ if (bytes_written == 1) { // remove the byte from the pipe
+ char c;
+ while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
+ }
+ }
+ if (errno == EBADF) { // Descriptors invalid.
+ // If pid_and_fds contains the problematic file descriptors we just used,
+ // this call will forget them, and the loop will try again.
+ pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ }
+ } while (errno == EBADF);
+ errno = save_errno;
+ return bytes_written == 1;
+}
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif
diff --git a/absl/debugging/internal/address_is_readable.h b/absl/debugging/internal/address_is_readable.h
new file mode 100644
index 0000000..ca8003e
--- /dev/null
+++ b/absl/debugging/internal/address_is_readable.h
@@ -0,0 +1,28 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
+#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
+
+namespace absl {
+namespace debugging_internal {
+
+// Return whether the byte at *addr is readable, without faulting.
+// Save and restores errno.
+bool AddressIsReadable(const void *addr);
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
diff --git a/absl/debugging/internal/demangle.cc b/absl/debugging/internal/demangle.cc
new file mode 100644
index 0000000..52a553f
--- /dev/null
+++ b/absl/debugging/internal/demangle.cc
@@ -0,0 +1,1877 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// For reference check out:
+// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
+//
+// Note that we only have partial C++11 support yet.
+
+#include "absl/debugging/internal/demangle.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <limits>
+
+namespace absl {
+namespace debugging_internal {
+
+typedef struct {
+ const char *abbrev;
+ const char *real_name;
+ // Number of arguments in <expression> context, or 0 if disallowed.
+ int arity;
+} AbbrevPair;
+
+// List of operators from Itanium C++ ABI.
+static const AbbrevPair kOperatorList[] = {
+ // New has special syntax (not currently supported).
+ {"nw", "new", 0},
+ {"na", "new[]", 0},
+
+ // Works except that the 'gs' prefix is not supported.
+ {"dl", "delete", 1},
+ {"da", "delete[]", 1},
+
+ {"ps", "+", 1}, // "positive"
+ {"ng", "-", 1}, // "negative"
+ {"ad", "&", 1}, // "address-of"
+ {"de", "*", 1}, // "dereference"
+ {"co", "~", 1},
+
+ {"pl", "+", 2},
+ {"mi", "-", 2},
+ {"ml", "*", 2},
+ {"dv", "/", 2},
+ {"rm", "%", 2},
+ {"an", "&", 2},
+ {"or", "|", 2},
+ {"eo", "^", 2},
+ {"aS", "=", 2},
+ {"pL", "+=", 2},
+ {"mI", "-=", 2},
+ {"mL", "*=", 2},
+ {"dV", "/=", 2},
+ {"rM", "%=", 2},
+ {"aN", "&=", 2},
+ {"oR", "|=", 2},
+ {"eO", "^=", 2},
+ {"ls", "<<", 2},
+ {"rs", ">>", 2},
+ {"lS", "<<=", 2},
+ {"rS", ">>=", 2},
+ {"eq", "==", 2},
+ {"ne", "!=", 2},
+ {"lt", "<", 2},
+ {"gt", ">", 2},
+ {"le", "<=", 2},
+ {"ge", ">=", 2},
+ {"nt", "!", 1},
+ {"aa", "&&", 2},
+ {"oo", "||", 2},
+ {"pp", "++", 1},
+ {"mm", "--", 1},
+ {"cm", ",", 2},
+ {"pm", "->*", 2},
+ {"pt", "->", 0}, // Special syntax
+ {"cl", "()", 0}, // Special syntax
+ {"ix", "[]", 2},
+ {"qu", "?", 3},
+ {"st", "sizeof", 0}, // Special syntax
+ {"sz", "sizeof", 1}, // Not a real operator name, but used in expressions.
+ {nullptr, nullptr, 0},
+};
+
+// List of builtin types from Itanium C++ ABI.
+static const AbbrevPair kBuiltinTypeList[] = {
+ {"v", "void", 0},
+ {"w", "wchar_t", 0},
+ {"b", "bool", 0},
+ {"c", "char", 0},
+ {"a", "signed char", 0},
+ {"h", "unsigned char", 0},
+ {"s", "short", 0},
+ {"t", "unsigned short", 0},
+ {"i", "int", 0},
+ {"j", "unsigned int", 0},
+ {"l", "long", 0},
+ {"m", "unsigned long", 0},
+ {"x", "long long", 0},
+ {"y", "unsigned long long", 0},
+ {"n", "__int128", 0},
+ {"o", "unsigned __int128", 0},
+ {"f", "float", 0},
+ {"d", "double", 0},
+ {"e", "long double", 0},
+ {"g", "__float128", 0},
+ {"z", "ellipsis", 0},
+ {nullptr, nullptr, 0},
+};
+
+// List of substitutions Itanium C++ ABI.
+static const AbbrevPair kSubstitutionList[] = {
+ {"St", "", 0},
+ {"Sa", "allocator", 0},
+ {"Sb", "basic_string", 0},
+ // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
+ {"Ss", "string", 0},
+ // std::basic_istream<char, std::char_traits<char> >
+ {"Si", "istream", 0},
+ // std::basic_ostream<char, std::char_traits<char> >
+ {"So", "ostream", 0},
+ // std::basic_iostream<char, std::char_traits<char> >
+ {"Sd", "iostream", 0},
+ {nullptr, nullptr, 0},
+};
+
+// State needed for demangling. This struct is copied in almost every stack
+// frame, so every byte counts.
+typedef struct {
+ int mangled_idx; // Cursor of mangled name.
+ int out_cur_idx; // Cursor of output std::string.
+ int prev_name_idx; // For constructors/destructors.
+ signed int prev_name_length : 16; // For constructors/destructors.
+ signed int nest_level : 15; // For nested names.
+ unsigned int append : 1; // Append flag.
+ // Note: for some reason MSVC can't pack "bool append : 1" into the same int
+ // with the above two fields, so we use an int instead. Amusingly it can pack
+ // "signed bool" as expected, but relying on that to continue to be a legal
+ // type seems ill-advised (as it's illegal in at least clang).
+} ParseState;
+
+static_assert(sizeof(ParseState) == 4 * sizeof(int),
+ "unexpected size of ParseState");
+
+// One-off state for demangling that's not subject to backtracking -- either
+// constant data, data that's intentionally immune to backtracking (steps), or
+// data that would never be changed by backtracking anyway (recursion_depth).
+//
+// Only one copy of this exists for each call to Demangle, so the size of this
+// struct is nearly inconsequential.
+typedef struct {
+ const char *mangled_begin; // Beginning of input std::string.
+ char *out; // Beginning of output std::string.
+ int out_end_idx; // One past last allowed output character.
+ int recursion_depth; // For stack exhaustion prevention.
+ int steps; // Cap how much work we'll do, regardless of depth.
+ ParseState parse_state; // Backtrackable state copied for most frames.
+} State;
+
+namespace {
+// Prevent deep recursion / stack exhaustion.
+// Also prevent unbounded handling of complex inputs.
+class ComplexityGuard {
+ public:
+ explicit ComplexityGuard(State *state) : state_(state) {
+ ++state->recursion_depth;
+ ++state->steps;
+ }
+ ~ComplexityGuard() { --state_->recursion_depth; }
+
+ // 256 levels of recursion seems like a reasonable upper limit on depth.
+ // 128 is not enough to demagle synthetic tests from demangle_unittest.txt:
+ // "_ZaaZZZZ..." and "_ZaaZcvZcvZ..."
+ static constexpr int kRecursionDepthLimit = 256;
+
+ // We're trying to pick a charitable upper-limit on how many parse steps are
+ // necessary to handle something that a human could actually make use of.
+ // This is mostly in place as a bound on how much work we'll do if we are
+ // asked to demangle an mangled name from an untrusted source, so it should be
+ // much larger than the largest expected symbol, but much smaller than the
+ // amount of work we can do in, e.g., a second.
+ //
+ // Some real-world symbols from an arbitrary binary started failing between
+ // 2^12 and 2^13, so we multiply the latter by an extra factor of 16 to set
+ // the limit.
+ //
+ // Spending one second on 2^17 parse steps would require each step to take
+ // 7.6us, or ~30000 clock cycles, so it's safe to say this can be done in
+ // under a second.
+ static constexpr int kParseStepsLimit = 1 << 17;
+
+ bool IsTooComplex() const {
+ return state_->recursion_depth > kRecursionDepthLimit ||
+ state_->steps > kParseStepsLimit;
+ }
+
+ private:
+ State *state_;
+};
+} // namespace
+
+// We don't use strlen() in libc since it's not guaranteed to be async
+// signal safe.
+static size_t StrLen(const char *str) {
+ size_t len = 0;
+ while (*str != '\0') {
+ ++str;
+ ++len;
+ }
+ return len;
+}
+
+// Returns true if "str" has at least "n" characters remaining.
+static bool AtLeastNumCharsRemaining(const char *str, int n) {
+ for (int i = 0; i < n; ++i) {
+ if (str[i] == '\0') {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if "str" has "prefix" as a prefix.
+static bool StrPrefix(const char *str, const char *prefix) {
+ size_t i = 0;
+ while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
+ ++i;
+ }
+ return prefix[i] == '\0'; // Consumed everything in "prefix".
+}
+
+static void InitState(State *state, const char *mangled, char *out,
+ int out_size) {
+ state->mangled_begin = mangled;
+ state->out = out;
+ state->out_end_idx = out_size;
+ state->recursion_depth = 0;
+ state->steps = 0;
+
+ state->parse_state.mangled_idx = 0;
+ state->parse_state.out_cur_idx = 0;
+ state->parse_state.prev_name_idx = 0;
+ state->parse_state.prev_name_length = -1;
+ state->parse_state.nest_level = -1;
+ state->parse_state.append = true;
+}
+
+static inline const char *RemainingInput(State *state) {
+ return &state->mangled_begin[state->parse_state.mangled_idx];
+}
+
+// Returns true and advances "mangled_idx" if we find "one_char_token"
+// at "mangled_idx" position. It is assumed that "one_char_token" does
+// not contain '\0'.
+static bool ParseOneCharToken(State *state, const char one_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == one_char_token) {
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_cur" if we find "two_char_token"
+// at "mangled_cur" position. It is assumed that "two_char_token" does
+// not contain '\0'.
+static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == two_char_token[0] &&
+ RemainingInput(state)[1] == two_char_token[1]) {
+ state->parse_state.mangled_idx += 2;
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_cur" if we find any character in
+// "char_class" at "mangled_cur" position.
+static bool ParseCharClass(State *state, const char *char_class) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (RemainingInput(state)[0] == '\0') {
+ return false;
+ }
+ const char *p = char_class;
+ for (; *p != '\0'; ++p) {
+ if (RemainingInput(state)[0] == *p) {
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool ParseDigit(State *state, int *digit) {
+ char c = RemainingInput(state)[0];
+ if (ParseCharClass(state, "0123456789")) {
+ if (digit != nullptr) {
+ *digit = c - '0';
+ }
+ return true;
+ }
+ return false;
+}
+
+// This function is used for handling an optional non-terminal.
+static bool Optional(bool /*status*/) { return true; }
+
+// This function is used for handling <non-terminal>+ syntax.
+typedef bool (*ParseFunc)(State *);
+static bool OneOrMore(ParseFunc parse_func, State *state) {
+ if (parse_func(state)) {
+ while (parse_func(state)) {
+ }
+ return true;
+ }
+ return false;
+}
+
+// This function is used for handling <non-terminal>* syntax. The function
+// always returns true and must be followed by a termination token or a
+// terminating sequence not handled by parse_func (e.g.
+// ParseOneCharToken(state, 'E')).
+static bool ZeroOrMore(ParseFunc parse_func, State *state) {
+ while (parse_func(state)) {
+ }
+ return true;
+}
+
+// Append "str" at "out_cur_idx". If there is an overflow, out_cur_idx is
+// set to out_end_idx+1. The output string is ensured to
+// always terminate with '\0' as long as there is no overflow.
+static void Append(State *state, const char *const str, const int length) {
+ for (int i = 0; i < length; ++i) {
+ if (state->parse_state.out_cur_idx + 1 <
+ state->out_end_idx) { // +1 for '\0'
+ state->out[state->parse_state.out_cur_idx++] = str[i];
+ } else {
+ // signal overflow
+ state->parse_state.out_cur_idx = state->out_end_idx + 1;
+ break;
+ }
+ }
+ if (state->parse_state.out_cur_idx < state->out_end_idx) {
+ state->out[state->parse_state.out_cur_idx] =
+ '\0'; // Terminate it with '\0'
+ }
+}
+
+// We don't use equivalents in libc to avoid locale issues.
+static bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
+
+static bool IsAlpha(char c) {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
+
+// Returns true if "str" is a function clone suffix. These suffixes are used
+// by GCC 4.5.x and later versions (and our locally-modified version of GCC
+// 4.4.x) to indicate functions which have been cloned during optimization.
+// We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
+static bool IsFunctionCloneSuffix(const char *str) {
+ size_t i = 0;
+ while (str[i] != '\0') {
+ // Consume a single .<alpha>+.<digit>+ sequence.
+ if (str[i] != '.' || !IsAlpha(str[i + 1])) {
+ return false;
+ }
+ i += 2;
+ while (IsAlpha(str[i])) {
+ ++i;
+ }
+ if (str[i] != '.' || !IsDigit(str[i + 1])) {
+ return false;
+ }
+ i += 2;
+ while (IsDigit(str[i])) {
+ ++i;
+ }
+ }
+ return true; // Consumed everything in "str".
+}
+
+static bool EndsWith(State *state, const char chr) {
+ return state->parse_state.out_cur_idx > 0 &&
+ chr == state->out[state->parse_state.out_cur_idx - 1];
+}
+
+// Append "str" with some tweaks, iff "append" state is true.
+static void MaybeAppendWithLength(State *state, const char *const str,
+ const int length) {
+ if (state->parse_state.append && length > 0) {
+ // Append a space if the output buffer ends with '<' and "str"
+ // starts with '<' to avoid <<<.
+ if (str[0] == '<' && EndsWith(state, '<')) {
+ Append(state, " ", 1);
+ }
+ // Remember the last identifier name for ctors/dtors.
+ if (IsAlpha(str[0]) || str[0] == '_') {
+ state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
+ state->parse_state.prev_name_length = length;
+ }
+ Append(state, str, length);
+ }
+}
+
+// Appends a positive decimal number to the output if appending is enabled.
+static bool MaybeAppendDecimal(State *state, unsigned int val) {
+ // Max {32-64}-bit unsigned int is 20 digits.
+ constexpr size_t kMaxLength = 20;
+ char buf[kMaxLength];
+
+ // We can't use itoa or sprintf as neither is specified to be
+ // async-signal-safe.
+ if (state->parse_state.append) {
+ // We can't have a one-before-the-beginning pointer, so instead start with
+ // one-past-the-end and manipulate one character before the pointer.
+ char *p = &buf[kMaxLength];
+ do { // val=0 is the only input that should write a leading zero digit.
+ *--p = (val % 10) + '0';
+ val /= 10;
+ } while (p > buf && val != 0);
+
+ // 'p' landed on the last character we set. How convenient.
+ Append(state, p, kMaxLength - (p - buf));
+ }
+
+ return true;
+}
+
+// A convenient wrapper around MaybeAppendWithLength().
+// Returns true so that it can be placed in "if" conditions.
+static bool MaybeAppend(State *state, const char *const str) {
+ if (state->parse_state.append) {
+ int length = StrLen(str);
+ MaybeAppendWithLength(state, str, length);
+ }
+ return true;
+}
+
+// This function is used for handling nested names.
+static bool EnterNestedName(State *state) {
+ state->parse_state.nest_level = 0;
+ return true;
+}
+
+// This function is used for handling nested names.
+static bool LeaveNestedName(State *state, int16_t prev_value) {
+ state->parse_state.nest_level = prev_value;
+ return true;
+}
+
+// Disable the append mode not to print function parameters, etc.
+static bool DisableAppend(State *state) {
+ state->parse_state.append = false;
+ return true;
+}
+
+// Restore the append mode to the previous state.
+static bool RestoreAppend(State *state, bool prev_value) {
+ state->parse_state.append = prev_value;
+ return true;
+}
+
+// Increase the nest level for nested names.
+static void MaybeIncreaseNestLevel(State *state) {
+ if (state->parse_state.nest_level > -1) {
+ ++state->parse_state.nest_level;
+ }
+}
+
+// Appends :: for nested names if necessary.
+static void MaybeAppendSeparator(State *state) {
+ if (state->parse_state.nest_level >= 1) {
+ MaybeAppend(state, "::");
+ }
+}
+
+// Cancel the last separator if necessary.
+static void MaybeCancelLastSeparator(State *state) {
+ if (state->parse_state.nest_level >= 1 && state->parse_state.append &&
+ state->parse_state.out_cur_idx >= 2) {
+ state->parse_state.out_cur_idx -= 2;
+ state->out[state->parse_state.out_cur_idx] = '\0';
+ }
+}
+
+// Returns true if the identifier of the given length pointed to by
+// "mangled_cur" is anonymous namespace.
+static bool IdentifierIsAnonymousNamespace(State *state, int length) {
+ // Returns true if "anon_prefix" is a proper prefix of "mangled_cur".
+ static const char anon_prefix[] = "_GLOBAL__N_";
+ return (length > static_cast<int>(sizeof(anon_prefix) - 1) &&
+ StrPrefix(RemainingInput(state), anon_prefix));
+}
+
+// Forward declarations of our parsing functions.
+static bool ParseMangledName(State *state);
+static bool ParseEncoding(State *state);
+static bool ParseName(State *state);
+static bool ParseUnscopedName(State *state);
+static bool ParseNestedName(State *state);
+static bool ParsePrefix(State *state);
+static bool ParseUnqualifiedName(State *state);
+static bool ParseSourceName(State *state);
+static bool ParseLocalSourceName(State *state);
+static bool ParseUnnamedTypeName(State *state);
+static bool ParseNumber(State *state, int *number_out);
+static bool ParseFloatNumber(State *state);
+static bool ParseSeqId(State *state);
+static bool ParseIdentifier(State *state, int length);
+static bool ParseOperatorName(State *state, int *arity);
+static bool ParseSpecialName(State *state);
+static bool ParseCallOffset(State *state);
+static bool ParseNVOffset(State *state);
+static bool ParseVOffset(State *state);
+static bool ParseCtorDtorName(State *state);
+static bool ParseDecltype(State *state);
+static bool ParseType(State *state);
+static bool ParseCVQualifiers(State *state);
+static bool ParseBuiltinType(State *state);
+static bool ParseFunctionType(State *state);
+static bool ParseBareFunctionType(State *state);
+static bool ParseClassEnumType(State *state);
+static bool ParseArrayType(State *state);
+static bool ParsePointerToMemberType(State *state);
+static bool ParseTemplateParam(State *state);
+static bool ParseTemplateTemplateParam(State *state);
+static bool ParseTemplateArgs(State *state);
+static bool ParseTemplateArg(State *state);
+static bool ParseBaseUnresolvedName(State *state);
+static bool ParseUnresolvedName(State *state);
+static bool ParseExpression(State *state);
+static bool ParseExprPrimary(State *state);
+static bool ParseExprCastValue(State *state);
+static bool ParseLocalName(State *state);
+static bool ParseLocalNameSuffix(State *state);
+static bool ParseDiscriminator(State *state);
+static bool ParseSubstitution(State *state, bool accept_std);
+
+// Implementation note: the following code is a straightforward
+// translation of the Itanium C++ ABI defined in BNF with a couple of
+// exceptions.
+//
+// - Support GNU extensions not defined in the Itanium C++ ABI
+// - <prefix> and <template-prefix> are combined to avoid infinite loop
+// - Reorder patterns to shorten the code
+// - Reorder patterns to give greedier functions precedence
+// We'll mark "Less greedy than" for these cases in the code
+//
+// Each parsing function changes the parse state and returns true on
+// success, or returns false and doesn't change the parse state (note:
+// the parse-steps counter increases regardless of success or failure).
+// To ensure that the parse state isn't changed in the latter case, we
+// save the original state before we call multiple parsing functions
+// consecutively with &&, and restore it if unsuccessful. See
+// ParseEncoding() as an example of this convention. We follow the
+// convention throughout the code.
+//
+// Originally we tried to do demangling without following the full ABI
+// syntax but it turned out we needed to follow the full syntax to
+// parse complicated cases like nested template arguments. Note that
+// implementing a full-fledged demangler isn't trivial (libiberty's
+// cp-demangle.c has +4300 lines).
+//
+// Note that (foo) in <(foo) ...> is a modifier to be ignored.
+//
+// Reference:
+// - Itanium C++ ABI
+// <https://mentorembedded.github.io/cxx-abi/abi.html#mangling>
+
+// <mangled-name> ::= _Z <encoding>
+static bool ParseMangledName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
+}
+
+// <encoding> ::= <(function) name> <bare-function-type>
+// ::= <(data) name>
+// ::= <special-name>
+static bool ParseEncoding(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ // Implementing the first two productions together as <name>
+ // [<bare-function-type>] avoids exponential blowup of backtracking.
+ //
+ // Since Optional(...) can't fail, there's no need to copy the state for
+ // backtracking.
+ if (ParseName(state) && Optional(ParseBareFunctionType(state))) {
+ return true;
+ }
+
+ if (ParseSpecialName(state)) {
+ return true;
+ }
+ return false;
+}
+
+// <name> ::= <nested-name>
+// ::= <unscoped-template-name> <template-args>
+// ::= <unscoped-name>
+// ::= <local-name>
+static bool ParseName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseNestedName(state) || ParseLocalName(state)) {
+ return true;
+ }
+
+ // We reorganize the productions to avoid re-parsing unscoped names.
+ // - Inline <unscoped-template-name> productions:
+ // <name> ::= <substitution> <template-args>
+ // ::= <unscoped-name> <template-args>
+ // ::= <unscoped-name>
+ // - Merge the two productions that start with unscoped-name:
+ // <name> ::= <unscoped-name> [<template-args>]
+
+ ParseState copy = state->parse_state;
+ // "std<...>" isn't a valid name.
+ if (ParseSubstitution(state, /*accept_std=*/false) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Note there's no need to restore state after this since only the first
+ // subparser can fail.
+ return ParseUnscopedName(state) && Optional(ParseTemplateArgs(state));
+}
+
+// <unscoped-name> ::= <unqualified-name>
+// ::= St <unqualified-name>
+static bool ParseUnscopedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseUnqualifiedName(state)) {
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
+ ParseUnqualifiedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <ref-qualifer> ::= R // lvalue method reference qualifier
+// ::= O // rvalue method reference qualifier
+static inline bool ParseRefQualifier(State *state) {
+ return ParseCharClass(state, "OR");
+}
+
+// <nested-name> ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix>
+// <unqualified-name> E
+// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+// <template-args> E
+static bool ParseNestedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
+ Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseRefQualifier(state)) && ParsePrefix(state) &&
+ LeaveNestedName(state, copy.nest_level) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// This part is tricky. If we literally translate them to code, we'll
+// end up infinite loop. Hence we merge them to avoid the case.
+//
+// <prefix> ::= <prefix> <unqualified-name>
+// ::= <template-prefix> <template-args>
+// ::= <template-param>
+// ::= <substitution>
+// ::= # empty
+// <template-prefix> ::= <prefix> <(template) unqualified-name>
+// ::= <template-param>
+// ::= <substitution>
+static bool ParsePrefix(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ bool has_something = false;
+ while (true) {
+ MaybeAppendSeparator(state);
+ if (ParseTemplateParam(state) ||
+ ParseSubstitution(state, /*accept_std=*/true) ||
+ ParseUnscopedName(state) ||
+ (ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
+ has_something = true;
+ MaybeIncreaseNestLevel(state);
+ continue;
+ }
+ MaybeCancelLastSeparator(state);
+ if (has_something && ParseTemplateArgs(state)) {
+ return ParsePrefix(state);
+ } else {
+ break;
+ }
+ }
+ return true;
+}
+
+// <unqualified-name> ::= <operator-name>
+// ::= <ctor-dtor-name>
+// ::= <source-name>
+// ::= <local-source-name> // GCC extension; see below.
+// ::= <unnamed-type-name>
+static bool ParseUnqualifiedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
+ ParseSourceName(state) || ParseLocalSourceName(state) ||
+ ParseUnnamedTypeName(state));
+}
+
+// <source-name> ::= <positive length number> <identifier>
+static bool ParseSourceName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ int length = -1;
+ if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <local-source-name> ::= L <source-name> [<discriminator>]
+//
+// References:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
+// https://gcc.gnu.org/viewcvs?view=rev&revision=124467
+static bool ParseLocalSourceName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
+ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <unnamed-type-name> ::= Ut [<(nonnegative) number>] _
+// ::= <closure-type-name>
+// <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
+// <lambda-sig> ::= <(parameter) type>+
+static bool ParseUnnamedTypeName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ // Type's 1-based index n is encoded as { "", n == 1; itoa(n-2), otherwise }.
+ // Optionally parse the encoded value into 'which' and add 2 to get the index.
+ int which = -1;
+
+ // Unnamed type local to function or class.
+ if (ParseTwoCharToken(state, "Ut") && Optional(ParseNumber(state, &which)) &&
+ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "{unnamed type#");
+ MaybeAppendDecimal(state, 2 + which);
+ MaybeAppend(state, "}");
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Closure type.
+ which = -1;
+ if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
+ OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
+ ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
+ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "{lambda()#");
+ MaybeAppendDecimal(state, 2 + which);
+ MaybeAppend(state, "}");
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <number> ::= [n] <non-negative decimal integer>
+// If "number_out" is non-null, then *number_out is set to the value of the
+// parsed number on success.
+static bool ParseNumber(State *state, int *number_out) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ bool negative = false;
+ if (ParseOneCharToken(state, 'n')) {
+ negative = true;
+ }
+ const char *p = RemainingInput(state);
+ uint64_t number = 0;
+ for (; *p != '\0'; ++p) {
+ if (IsDigit(*p)) {
+ number = number * 10 + (*p - '0');
+ } else {
+ break;
+ }
+ }
+ // Apply the sign with uint64_t arithmetic so overflows aren't UB. Gives
+ // "incorrect" results for out-of-range inputs, but negative values only
+ // appear for literals, which aren't printed.
+ if (negative) {
+ number = ~number + 1;
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ if (number_out != nullptr) {
+ // Note: possibly truncate "number".
+ *number_out = number;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Floating-point literals are encoded using a fixed-length lowercase
+// hexadecimal string.
+static bool ParseFloatNumber(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ const char *p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
+ break;
+ }
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ return true;
+ }
+ return false;
+}
+
+// The <seq-id> is a sequence number in base 36,
+// using digits and upper case letters
+static bool ParseSeqId(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ const char *p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
+ break;
+ }
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ return true;
+ }
+ return false;
+}
+
+// <identifier> ::= <unqualified source code identifier> (of given length)
+static bool ParseIdentifier(State *state, int length) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (length < 0 || !AtLeastNumCharsRemaining(RemainingInput(state), length)) {
+ return false;
+ }
+ if (IdentifierIsAnonymousNamespace(state, length)) {
+ MaybeAppend(state, "(anonymous namespace)");
+ } else {
+ MaybeAppendWithLength(state, RemainingInput(state), length);
+ }
+ state->parse_state.mangled_idx += length;
+ return true;
+}
+
+// <operator-name> ::= nw, and other two letters cases
+// ::= cv <type> # (cast)
+// ::= v <digit> <source-name> # vendor extended operator
+static bool ParseOperatorName(State *state, int *arity) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (!AtLeastNumCharsRemaining(RemainingInput(state), 2)) {
+ return false;
+ }
+ // First check with "cv" (cast) case.
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
+ EnterNestedName(state) && ParseType(state) &&
+ LeaveNestedName(state, copy.nest_level)) {
+ if (arity != nullptr) {
+ *arity = 1;
+ }
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Then vendor extended operators.
+ if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
+ ParseSourceName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Other operator names should start with a lower alphabet followed
+ // by a lower/upper alphabet.
+ if (!(IsLower(RemainingInput(state)[0]) &&
+ IsAlpha(RemainingInput(state)[1]))) {
+ return false;
+ }
+ // We may want to perform a binary search if we really need speed.
+ const AbbrevPair *p;
+ for (p = kOperatorList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[0] &&
+ RemainingInput(state)[1] == p->abbrev[1]) {
+ if (arity != nullptr) {
+ *arity = p->arity;
+ }
+ MaybeAppend(state, "operator");
+ if (IsLower(*p->real_name)) { // new, delete, etc.
+ MaybeAppend(state, " ");
+ }
+ MaybeAppend(state, p->real_name);
+ state->parse_state.mangled_idx += 2;
+ return true;
+ }
+ }
+ return false;
+}
+
+// <special-name> ::= TV <type>
+// ::= TT <type>
+// ::= TI <type>
+// ::= TS <type>
+// ::= Tc <call-offset> <call-offset> <(base) encoding>
+// ::= GV <(object) name>
+// ::= T <call-offset> <(base) encoding>
+// G++ extensions:
+// ::= TC <type> <(offset) number> _ <(base) type>
+// ::= TF <type>
+// ::= TJ <type>
+// ::= GR <name>
+// ::= GA <encoding>
+// ::= Th <call-offset> <(base) encoding>
+// ::= Tv <call-offset> <(base) encoding>
+//
+// Note: we don't care much about them since they don't appear in
+// stack traces. The are special data.
+static bool ParseSpecialName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
+ ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
+ ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // G++ extensions
+ if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
+ ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ DisableAppend(state) && ParseType(state)) {
+ RestoreAppend(state, copy.append);
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
+ ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <call-offset> ::= h <nv-offset> _
+// ::= v <v-offset> _
+static bool ParseCallOffset(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <nv-offset> ::= <(offset) number>
+static bool ParseNVOffset(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return ParseNumber(state, nullptr);
+}
+
+// <v-offset> ::= <(offset) number> _ <(virtual offset) number>
+static bool ParseVOffset(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ ParseNumber(state, nullptr)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <ctor-dtor-name> ::= C1 | C2 | C3
+// ::= D0 | D1 | D2
+// # GCC extensions: "unified" constructor/destructor. See
+// # https://github.com/gcc-mirror/gcc/blob/7ad17b583c3643bd4557f29b8391ca7ef08391f5/gcc/cp/mangle.c#L1847
+// ::= C4 | D4
+static bool ParseCtorDtorName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'C') && ParseCharClass(state, "1234")) {
+ const char *const prev_name = state->out + state->parse_state.prev_name_idx;
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "0124")) {
+ const char *const prev_name = state->out + state->parse_state.prev_name_idx;
+ MaybeAppend(state, "~");
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <decltype> ::= Dt <expression> E # decltype of an id-expression or class
+// # member access (C++0x)
+// ::= DT <expression> E # decltype of an expression (C++0x)
+static bool ParseDecltype(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
+ ParseExpression(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <type> ::= <CV-qualifiers> <type>
+// ::= P <type> # pointer-to
+// ::= R <type> # reference-to
+// ::= O <type> # rvalue reference-to (C++0x)
+// ::= C <type> # complex pair (C 2000)
+// ::= G <type> # imaginary (C 2000)
+// ::= U <source-name> <type> # vendor extended type qualifier
+// ::= <builtin-type>
+// ::= <function-type>
+// ::= <class-enum-type> # note: just an alias for <name>
+// ::= <array-type>
+// ::= <pointer-to-member-type>
+// ::= <template-template-param> <template-args>
+// ::= <template-param>
+// ::= <decltype>
+// ::= <substitution>
+// ::= Dp <type> # pack expansion of (C++0x)
+//
+static bool ParseType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ // We should check CV-qualifers, and PRGC things first.
+ //
+ // CV-qualifiers overlap with some operator names, but an operator name is not
+ // valid as a type. To avoid an ambiguity that can lead to exponential time
+ // complexity, refuse to backtrack the CV-qualifiers.
+ //
+ // _Z4aoeuIrMvvE
+ // => _Z 4aoeuI rM v v E
+ // aoeu<operator%=, void, void>
+ // => _Z 4aoeuI r Mv v E
+ // aoeu<void void::* restrict>
+ //
+ // By consuming the CV-qualifiers first, the former parse is disabled.
+ if (ParseCVQualifiers(state)) {
+ const bool result = ParseType(state);
+ if (!result) state->parse_state = copy;
+ return result;
+ }
+ state->parse_state = copy;
+
+ // Similarly, these tag characters can overlap with other <name>s resulting in
+ // two different parse prefixes that land on <template-args> in the same
+ // place, such as "C3r1xI...". So, disable the "ctor-name = C3" parse by
+ // refusing to backtrack the tag characters.
+ if (ParseCharClass(state, "OPRCG")) {
+ const bool result = ParseType(state);
+ if (!result) state->parse_state = copy;
+ return result;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // nullptr_t, i.e. decltype(nullptr).
+ if (ParseTwoCharToken(state, "Dn")) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
+ ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseBuiltinType(state) || ParseFunctionType(state) ||
+ ParseClassEnumType(state) || ParseArrayType(state) ||
+ ParsePointerToMemberType(state) || ParseDecltype(state) ||
+ // "std" on its own isn't a type.
+ ParseSubstitution(state, /*accept_std=*/false)) {
+ return true;
+ }
+
+ if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Less greedy than <template-template-param> <template-args>.
+ if (ParseTemplateParam(state)) {
+ return true;
+ }
+
+ return false;
+}
+
+// <CV-qualifiers> ::= [r] [V] [K]
+// We don't allow empty <CV-qualifiers> to avoid infinite loop in
+// ParseType().
+static bool ParseCVQualifiers(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ int num_cv_qualifiers = 0;
+ num_cv_qualifiers += ParseOneCharToken(state, 'r');
+ num_cv_qualifiers += ParseOneCharToken(state, 'V');
+ num_cv_qualifiers += ParseOneCharToken(state, 'K');
+ return num_cv_qualifiers > 0;
+}
+
+// <builtin-type> ::= v, etc.
+// ::= u <source-name>
+static bool ParseBuiltinType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ const AbbrevPair *p;
+ for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[0]) {
+ MaybeAppend(state, p->real_name);
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <function-type> ::= F [Y] <bare-function-type> E
+static bool ParseFunctionType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'F') &&
+ Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <bare-function-type> ::= <(signature) type>+
+static bool ParseBareFunctionType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (OneOrMore(ParseType, state)) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "()");
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <class-enum-type> ::= <name>
+static bool ParseClassEnumType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return ParseName(state);
+}
+
+// <array-type> ::= A <(positive dimension) number> _ <(element) type>
+// ::= A [<(dimension) expression>] _ <(element) type>
+static bool ParseArrayType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <pointer-to-member-type> ::= M <(class) type> <(member) type>
+static bool ParsePointerToMemberType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-param> ::= T_
+// ::= T <parameter-2 non-negative number> _
+static bool ParseTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseTwoCharToken(state, "T_")) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-template-param> ::= <template-param>
+// ::= <substitution>
+static bool ParseTemplateTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ return (ParseTemplateParam(state) ||
+ // "std" on its own isn't a template.
+ ParseSubstitution(state, /*accept_std=*/false));
+}
+
+// <template-args> ::= I <template-arg>+ E
+static bool ParseTemplateArgs(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "<>");
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <template-arg> ::= <type>
+// ::= <expr-primary>
+// ::= J <template-arg>* E # argument pack
+// ::= X <expression> E
+static bool ParseTemplateArg(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'J') && ZeroOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // There can be significant overlap between the following leading to
+ // exponential backtracking:
+ //
+ // <expr-primary> ::= L <type> <expr-cast-value> E
+ // e.g. L 2xxIvE 1 E
+ // <type> ==> <local-source-name> <template-args>
+ // e.g. L 2xx IvE
+ //
+ // This means parsing an entire <type> twice, and <type> can contain
+ // <template-arg>, so this can generate exponential backtracking. There is
+ // only overlap when the remaining input starts with "L <source-name>", so
+ // parse all cases that can start this way jointly to share the common prefix.
+ //
+ // We have:
+ //
+ // <template-arg> ::= <type>
+ // ::= <expr-primary>
+ //
+ // First, drop all the productions of <type> that must start with something
+ // other than 'L'. All that's left is <class-enum-type>; inline it.
+ //
+ // <type> ::= <nested-name> # starts with 'N'
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name> # starts with 'Z'
+ //
+ // Drop and inline again:
+ //
+ // <type> ::= <unscoped-name>
+ // ::= <unscoped-name> <template-args>
+ // ::= <substitution> <template-args> # starts with 'S'
+ //
+ // Merge the first two, inline <unscoped-name>, drop last:
+ //
+ // <type> ::= <unqualified-name> [<template-args>]
+ // ::= St <unqualified-name> [<template-args>] # starts with 'S'
+ //
+ // Drop and inline:
+ //
+ // <type> ::= <operator-name> [<template-args>] # starts with lowercase
+ // ::= <ctor-dtor-name> [<template-args>] # starts with 'C' or 'D'
+ // ::= <source-name> [<template-args>] # starts with digit
+ // ::= <local-source-name> [<template-args>]
+ // ::= <unnamed-type-name> [<template-args>] # starts with 'U'
+ //
+ // One more time:
+ //
+ // <type> ::= L <source-name> [<template-args>]
+ //
+ // Likewise with <expr-primary>:
+ //
+ // <expr-primary> ::= L <type> <expr-cast-value> E
+ // ::= LZ <encoding> E # cannot overlap; drop
+ // ::= L <mangled_name> E # cannot overlap; drop
+ //
+ // By similar reasoning as shown above, the only <type>s starting with
+ // <source-name> are "<source-name> [<template-args>]". Inline this.
+ //
+ // <expr-primary> ::= L <source-name> [<template-args>] <expr-cast-value> E
+ //
+ // Now inline both of these into <template-arg>:
+ //
+ // <template-arg> ::= L <source-name> [<template-args>]
+ // ::= L <source-name> [<template-args>] <expr-cast-value> E
+ //
+ // Merge them and we're done:
+ // <template-arg>
+ // ::= L <source-name> [<template-args>] [<expr-cast-value> E]
+ if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
+ copy = state->parse_state;
+ if (ParseExprCastValue(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return true;
+ }
+
+ // Now that the overlapping cases can't reach this code, we can safely call
+ // both of these.
+ if (ParseType(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <unresolved-type> ::= <template-param> [<template-args>]
+// ::= <decltype>
+// ::= <substitution>
+static inline bool ParseUnresolvedType(State *state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+ return (ParseTemplateParam(state) && Optional(ParseTemplateArgs(state))) ||
+ ParseDecltype(state) || ParseSubstitution(state, /*accept_std=*/false);
+}
+
+// <simple-id> ::= <source-name> [<template-args>]
+static inline bool ParseSimpleId(State *state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+
+ // Note: <simple-id> cannot be followed by a parameter pack; see comment in
+ // ParseUnresolvedType.
+ return ParseSourceName(state) && Optional(ParseTemplateArgs(state));
+}
+
+// <base-unresolved-name> ::= <source-name> [<template-args>]
+// ::= on <operator-name> [<template-args>]
+// ::= dn <destructor-name>
+static bool ParseBaseUnresolvedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ if (ParseSimpleId(state)) {
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "on") && ParseOperatorName(state, nullptr) &&
+ Optional(ParseTemplateArgs(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "dn") &&
+ (ParseUnresolvedType(state) || ParseSimpleId(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <unresolved-name> ::= [gs] <base-unresolved-name>
+// ::= sr <unresolved-type> <base-unresolved-name>
+// ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
+// <base-unresolved-name>
+// ::= [gs] sr <unresolved-qualifier-level>+ E
+// <base-unresolved-name>
+static bool ParseUnresolvedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ ParseState copy = state->parse_state;
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "sr") && ParseUnresolvedType(state) &&
+ ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
+ ParseUnresolvedType(state) &&
+ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ ParseTwoCharToken(state, "sr") &&
+ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <expression> ::= <1-ary operator-name> <expression>
+// ::= <2-ary operator-name> <expression> <expression>
+// ::= <3-ary operator-name> <expression> <expression> <expression>
+// ::= cl <expression>+ E
+// ::= cv <type> <expression> # type (expression)
+// ::= cv <type> _ <expression>* E # type (expr-list)
+// ::= st <type>
+// ::= <template-param>
+// ::= <function-param>
+// ::= <expr-primary>
+// ::= dt <expression> <unresolved-name> # expr.name
+// ::= pt <expression> <unresolved-name> # expr->name
+// ::= sp <expression> # argument pack expansion
+// ::= sr <type> <unqualified-name> <template-args>
+// ::= sr <type> <unqualified-name>
+// <function-param> ::= fp <(top-level) CV-qualifiers> _
+// ::= fp <(top-level) CV-qualifiers> <number> _
+// ::= fL <number> p <(top-level) CV-qualifiers> _
+// ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+static bool ParseExpression(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+
+ // Object/function call expression.
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 0).
+ if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 1+).
+ if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
+ ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Parse the conversion expressions jointly to avoid re-parsing the <type> in
+ // their common prefix. Parsed as:
+ // <expression> ::= cv <type> <conversion-args>
+ // <conversion-args> ::= _ <expression>* E
+ // ::= <expression>
+ //
+ // Also don't try ParseOperatorName after seeing "cv", since ParseOperatorName
+ // also needs to accept "cv <type>" in other contexts.
+ if (ParseTwoCharToken(state, "cv")) {
+ if (ParseType(state)) {
+ ParseState copy2 = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ZeroOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy2;
+ if (ParseExpression(state)) {
+ return true;
+ }
+ }
+ } else {
+ // Parse unary, binary, and ternary operator expressions jointly, taking
+ // care not to re-parse subexpressions repeatedly. Parse like:
+ // <expression> ::= <operator-name> <expression>
+ // [<one-to-two-expressions>]
+ // <one-to-two-expressions> ::= <expression> [<expression>]
+ int arity = -1;
+ if (ParseOperatorName(state, &arity) &&
+ arity > 0 && // 0 arity => disabled.
+ (arity < 3 || ParseExpression(state)) &&
+ (arity < 2 || ParseExpression(state)) &&
+ (arity < 1 || ParseExpression(state))) {
+ return true;
+ }
+ }
+ state->parse_state = copy;
+
+ // sizeof type
+ if (ParseTwoCharToken(state, "st") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Object and pointer member access expressions.
+ if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
+ ParseExpression(state) && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Pointer-to-member access expressions. This parses the same as a binary
+ // operator, but it's implemented separately because "ds" shouldn't be
+ // accepted in other contexts that parse an operator name.
+ if (ParseTwoCharToken(state, "ds") && ParseExpression(state) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Parameter pack expansion
+ if (ParseTwoCharToken(state, "sp") && ParseExpression(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return ParseUnresolvedName(state);
+}
+
+// <expr-primary> ::= L <type> <(value) number> E
+// ::= L <type> <(value) float> E
+// ::= L <mangled-name> E
+// // A bug in g++'s C++ ABI version 2 (-fabi-version=2).
+// ::= LZ <encoding> E
+//
+// Warning, subtle: the "bug" LZ production above is ambiguous with the first
+// production where <type> starts with <local-name>, which can lead to
+// exponential backtracking in two scenarios:
+//
+// - When whatever follows the E in the <local-name> in the first production is
+// not a name, we backtrack the whole <encoding> and re-parse the whole thing.
+//
+// - When whatever follows the <local-name> in the first production is not a
+// number and this <expr-primary> may be followed by a name, we backtrack the
+// <name> and re-parse it.
+//
+// Moreover this ambiguity isn't always resolved -- for example, the following
+// has two different parses:
+//
+// _ZaaILZ4aoeuE1x1EvE
+// => operator&&<aoeu, x, E, void>
+// => operator&&<(aoeu::x)(1), void>
+//
+// To resolve this, we just do what GCC's demangler does, and refuse to parse
+// casts to <local-name> types.
+static bool ParseExprPrimary(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+
+ // The "LZ" special case: if we see LZ, we commit to accept "LZ <encoding> E"
+ // or fail, no backtracking.
+ if (ParseTwoCharToken(state, "LZ")) {
+ if (ParseEncoding(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+
+ state->parse_state = copy;
+ return false;
+ }
+
+ // The merged cast production.
+ if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+ ParseExprCastValue(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <number> or <float>, followed by 'E', as described above ParseExprPrimary.
+static bool ParseExprCastValue(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ // We have to be able to backtrack after accepting a number because we could
+ // have e.g. "7fffE", which will accept "7" as a number but then fail to find
+ // the 'E'.
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <local-name> ::= Z <(function) encoding> E <(entity) name> [<discriminator>]
+// ::= Z <(function) encoding> E s [<discriminator>]
+//
+// Parsing a common prefix of these two productions together avoids an
+// exponential blowup of backtracking. Parse like:
+// <local-name> := Z <encoding> E <local-name-suffix>
+// <local-name-suffix> ::= s [<discriminator>]
+// ::= <name> [<discriminator>]
+
+static bool ParseLocalNameSuffix(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+
+ if (MaybeAppend(state, "::") && ParseName(state) &&
+ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+
+ // Since we're not going to overwrite the above "::" by re-parsing the
+ // <encoding> (whose trailing '\0' byte was in the byte now holding the
+ // first ':'), we have to rollback the "::" if the <name> parse failed.
+ if (state->parse_state.append) {
+ state->out[state->parse_state.out_cur_idx - 2] = '\0';
+ }
+
+ return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
+}
+
+static bool ParseLocalName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+ ParseOneCharToken(state, 'E') && ParseLocalNameSuffix(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <discriminator> := _ <(non-negative) number>
+static bool ParseDiscriminator(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
+ return true;
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// <substitution> ::= S_
+// ::= S <seq-id> _
+// ::= St, etc.
+//
+// "St" is special in that it's not valid as a standalone name, and it *is*
+// allowed to precede a name without being wrapped in "N...E". This means that
+// if we accept it on its own, we can accept "St1a" and try to parse
+// template-args, then fail and backtrack, accept "St" on its own, then "1a" as
+// an unqualified name and re-parse the same template-args. To block this
+// exponential backtracking, we disable it with 'accept_std=false' in
+// problematic contexts.
+static bool ParseSubstitution(State *state, bool accept_std) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseTwoCharToken(state, "S_")) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Expand abbreviations like "St" => "std".
+ if (ParseOneCharToken(state, 'S')) {
+ const AbbrevPair *p;
+ for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[1] &&
+ (accept_std || p->abbrev[1] != 't')) {
+ MaybeAppend(state, "std");
+ if (p->real_name[0] != '\0') {
+ MaybeAppend(state, "::");
+ MaybeAppend(state, p->real_name);
+ }
+ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+ }
+ state->parse_state = copy;
+ return false;
+}
+
+// Parse <mangled-name>, optionally followed by either a function-clone suffix
+// or version suffix. Returns true only if all of "mangled_cur" was consumed.
+static bool ParseTopLevelMangledName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) return false;
+ if (ParseMangledName(state)) {
+ if (RemainingInput(state)[0] != '\0') {
+ // Drop trailing function clone suffix, if any.
+ if (IsFunctionCloneSuffix(RemainingInput(state))) {
+ return true;
+ }
+ // Append trailing version suffix if any.
+ // ex. _Z3foo@@GLIBCXX_3.4
+ if (RemainingInput(state)[0] == '@') {
+ MaybeAppend(state, RemainingInput(state));
+ return true;
+ }
+ return false; // Unconsumed suffix.
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool Overflowed(const State *state) {
+ return state->parse_state.out_cur_idx >= state->out_end_idx;
+}
+
+// The demangler entry point.
+bool Demangle(const char *mangled, char *out, int out_size) {
+ State state;
+ InitState(&state, mangled, out, out_size);
+ return ParseTopLevelMangledName(&state) && !Overflowed(&state);
+}
+
+} // namespace debugging_internal
+} // namespace absl
diff --git a/absl/debugging/internal/demangle.h b/absl/debugging/internal/demangle.h
new file mode 100644
index 0000000..81bb0df
--- /dev/null
+++ b/absl/debugging/internal/demangle.h
@@ -0,0 +1,67 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// An async-signal-safe and thread-safe demangler for Itanium C++ ABI
+// (aka G++ V3 ABI).
+//
+// The demangler is implemented to be used in async signal handlers to
+// symbolize stack traces. We cannot use libstdc++'s
+// abi::__cxa_demangle() in such signal handlers since it's not async
+// signal safe (it uses malloc() internally).
+//
+// Note that this demangler doesn't support full demangling. More
+// specifically, it doesn't print types of function parameters and
+// types of template arguments. It just skips them. However, it's
+// still very useful to extract basic information such as class,
+// function, constructor, destructor, and operator names.
+//
+// See the implementation note in demangle.cc if you are interested.
+//
+// Example:
+//
+// | Mangled Name | The Demangler | abi::__cxa_demangle()
+// |---------------|---------------|-----------------------
+// | _Z1fv | f() | f()
+// | _Z1fi | f() | f(int)
+// | _Z3foo3bar | foo() | foo(bar)
+// | _Z1fIiEvi | f<>() | void f<int>(int)
+// | _ZN1N1fE | N::f | N::f
+// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar()
+// | _Zrm1XS_" | operator%() | operator%(X, X)
+// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo()
+// | _Z1fSs | f() | f(std::basic_string<char,
+// | | | std::char_traits<char>,
+// | | | std::allocator<char> >)
+//
+// See the unit test for more examples.
+//
+// Note: we might want to write demanglers for ABIs other than Itanium
+// C++ ABI in the future.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
+#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
+
+namespace absl {
+namespace debugging_internal {
+
+// Demangle `mangled`. On success, return true and write the
+// demangled symbol name to `out`. Otherwise, return false.
+// `out` is modified even if demangling is unsuccessful.
+bool Demangle(const char *mangled, char *out, int out_size);
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
diff --git a/absl/debugging/internal/demangle_test.cc b/absl/debugging/internal/demangle_test.cc
new file mode 100644
index 0000000..a68ce32
--- /dev/null
+++ b/absl/debugging/internal/demangle_test.cc
@@ -0,0 +1,193 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/demangle.h"
+
+#include <cstdlib>
+#include <string>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/internal/stack_consumption.h"
+#include "absl/memory/memory.h"
+
+namespace absl {
+namespace debugging_internal {
+namespace {
+
+// A wrapper function for Demangle() to make the unit test simple.
+static const char *DemangleIt(const char * const mangled) {
+ static char demangled[4096];
+ if (Demangle(mangled, demangled, sizeof(demangled))) {
+ return demangled;
+ } else {
+ return mangled;
+ }
+}
+
+// Test corner cases of bounary conditions.
+TEST(Demangle, CornerCases) {
+ char tmp[10];
+ EXPECT_TRUE(Demangle("_Z6foobarv", tmp, sizeof(tmp)));
+ // sizeof("foobar()") == 9
+ EXPECT_STREQ("foobar()", tmp);
+ EXPECT_TRUE(Demangle("_Z6foobarv", tmp, 9));
+ EXPECT_STREQ("foobar()", tmp);
+ EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 8)); // Not enough.
+ EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 1));
+ EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 0));
+ EXPECT_FALSE(Demangle("_Z6foobarv", nullptr, 0)); // Should not cause SEGV.
+ EXPECT_FALSE(Demangle("_Z1000000", tmp, 9));
+}
+
+// Test handling of functions suffixed with .clone.N, which is used
+// by GCC 4.5.x (and our locally-modified version of GCC 4.4.x), and
+// .constprop.N and .isra.N, which are used by GCC 4.6.x. These
+// suffixes are used to indicate functions which have been cloned
+// during optimization. We ignore these suffixes.
+TEST(Demangle, Clones) {
+ char tmp[20];
+ EXPECT_TRUE(Demangle("_ZL3Foov", tmp, sizeof(tmp)));
+ EXPECT_STREQ("Foo()", tmp);
+ EXPECT_TRUE(Demangle("_ZL3Foov.clone.3", tmp, sizeof(tmp)));
+ EXPECT_STREQ("Foo()", tmp);
+ EXPECT_TRUE(Demangle("_ZL3Foov.constprop.80", tmp, sizeof(tmp)));
+ EXPECT_STREQ("Foo()", tmp);
+ EXPECT_TRUE(Demangle("_ZL3Foov.isra.18", tmp, sizeof(tmp)));
+ EXPECT_STREQ("Foo()", tmp);
+ EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
+ EXPECT_STREQ("Foo()", tmp);
+ // Invalid (truncated), should not demangle.
+ EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
+ // Invalid (.clone. not followed by number), should not demangle.
+ EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
+ // Invalid (.clone. followed by non-number), should not demangle.
+ EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
+ // Invalid (.constprop. not followed by number), should not demangle.
+ EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
+}
+
+// Tests that verify that Demangle footprint is within some limit.
+// They are not to be run under sanitizers as the sanitizers increase
+// stack consumption by about 4x.
+#if defined(ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION) && \
+ !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER)
+
+static const char *g_mangled;
+static char g_demangle_buffer[4096];
+static char *g_demangle_result;
+
+static void DemangleSignalHandler(int signo) {
+ if (Demangle(g_mangled, g_demangle_buffer, sizeof(g_demangle_buffer))) {
+ g_demangle_result = g_demangle_buffer;
+ } else {
+ g_demangle_result = nullptr;
+ }
+}
+
+// Call Demangle and figure out the stack footprint of this call.
+static const char *DemangleStackConsumption(const char *mangled,
+ int *stack_consumed) {
+ g_mangled = mangled;
+ *stack_consumed = GetSignalHandlerStackConsumption(DemangleSignalHandler);
+ ABSL_RAW_LOG(INFO, "Stack consumption of Demangle: %d", *stack_consumed);
+ return g_demangle_result;
+}
+
+// Demangle stack consumption should be within 8kB for simple mangled names
+// with some level of nesting. With alternate signal stack we have 64K,
+// but some signal handlers run on thread stack, and could have arbitrarily
+// little space left (so we don't want to make this number too large).
+const int kStackConsumptionUpperLimit = 8192;
+
+// Returns a mangled name nested to the given depth.
+static std::string NestedMangledName(int depth) {
+ std::string mangled_name = "_Z1a";
+ if (depth > 0) {
+ mangled_name += "IXL";
+ mangled_name += NestedMangledName(depth - 1);
+ mangled_name += "EEE";
+ }
+ return mangled_name;
+}
+
+TEST(Demangle, DemangleStackConsumption) {
+ // Measure stack consumption of Demangle for nested mangled names of varying
+ // depth. Since Demangle is implemented as a recursive descent parser,
+ // stack consumption will grow as the nesting depth increases. By measuring
+ // the stack consumption for increasing depths, we can see the growing
+ // impact of any stack-saving changes made to the code for Demangle.
+ int stack_consumed = 0;
+
+ const char *demangled =
+ DemangleStackConsumption("_Z6foobarv", &stack_consumed);
+ EXPECT_STREQ("foobar()", demangled);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
+
+ const std::string nested_mangled_name0 = NestedMangledName(0);
+ demangled = DemangleStackConsumption(nested_mangled_name0.c_str(),
+ &stack_consumed);
+ EXPECT_STREQ("a", demangled);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
+
+ const std::string nested_mangled_name1 = NestedMangledName(1);
+ demangled = DemangleStackConsumption(nested_mangled_name1.c_str(),
+ &stack_consumed);
+ EXPECT_STREQ("a<>", demangled);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
+
+ const std::string nested_mangled_name2 = NestedMangledName(2);
+ demangled = DemangleStackConsumption(nested_mangled_name2.c_str(),
+ &stack_consumed);
+ EXPECT_STREQ("a<>", demangled);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
+
+ const std::string nested_mangled_name3 = NestedMangledName(3);
+ demangled = DemangleStackConsumption(nested_mangled_name3.c_str(),
+ &stack_consumed);
+ EXPECT_STREQ("a<>", demangled);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
+}
+
+#endif // Stack consumption tests
+
+static void TestOnInput(const char* input) {
+ static const int kOutSize = 1048576;
+ auto out = absl::make_unique<char[]>(kOutSize);
+ Demangle(input, out.get(), kOutSize);
+}
+
+TEST(DemangleRegression, NegativeLength) {
+ TestOnInput("_ZZn4");
+}
+
+TEST(DemangleRegression, DeeplyNestedArrayType) {
+ const int depth = 100000;
+ std::string data = "_ZStI";
+ data.reserve(data.size() + 3 * depth + 1);
+ for (int i = 0; i < depth; i++) {
+ data += "A1_";
+ }
+ TestOnInput(data.c_str());
+}
+
+} // namespace
+} // namespace debugging_internal
+} // namespace absl
diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc
new file mode 100644
index 0000000..e7408bc
--- /dev/null
+++ b/absl/debugging/internal/elf_mem_image.cc
@@ -0,0 +1,380 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Allow dynamic symbol lookup in an in-memory Elf image.
+//
+
+#include "absl/debugging/internal/elf_mem_image.h"
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
+
+#include <string.h>
+#include <cassert>
+#include <cstddef>
+#include "absl/base/internal/raw_logging.h"
+
+// From binutils/include/elf/common.h (this doesn't appear to be documented
+// anywhere else).
+//
+// /* This flag appears in a Versym structure. It means that the symbol
+// is hidden, and is only visible with an explicit version number.
+// This is a GNU extension. */
+// #define VERSYM_HIDDEN 0x8000
+//
+// /* This is the mask for the rest of the Versym information. */
+// #define VERSYM_VERSION 0x7fff
+
+#define VERSYM_VERSION 0x7fff
+
+namespace absl {
+namespace debugging_internal {
+
+namespace {
+
+#if __WORDSIZE == 32
+const int kElfClass = ELFCLASS32;
+int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); }
+int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); }
+#elif __WORDSIZE == 64
+const int kElfClass = ELFCLASS64;
+int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); }
+int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); }
+#else
+const int kElfClass = -1;
+int ElfBind(const ElfW(Sym) *) {
+ ABSL_RAW_LOG(FATAL, "Unexpected word size");
+ return 0;
+}
+int ElfType(const ElfW(Sym) *) {
+ ABSL_RAW_LOG(FATAL, "Unexpected word size");
+ return 0;
+}
+#endif
+
+// Extract an element from one of the ELF tables, cast it to desired type.
+// This is just a simple arithmetic and a glorified cast.
+// Callers are responsible for bounds checking.
+template <typename T>
+const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset,
+ ElfW(Word) element_size, size_t index) {
+ return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ + table_offset
+ + index * element_size);
+}
+
+} // namespace
+
+// The value of this variable doesn't matter; it's used only for its
+// unique address.
+const int ElfMemImage::kInvalidBaseSentinel = 0;
+
+ElfMemImage::ElfMemImage(const void *base) {
+ ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");
+ Init(base);
+}
+
+int ElfMemImage::GetNumSymbols() const {
+ if (!hash_) {
+ return 0;
+ }
+ // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
+ return hash_[1];
+}
+
+const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
+ ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
+ return dynsym_ + index;
+}
+
+const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
+ ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
+ return versym_ + index;
+}
+
+const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
+ ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range");
+ return GetTableElement<ElfW(Phdr)>(ehdr_,
+ ehdr_->e_phoff,
+ ehdr_->e_phentsize,
+ index);
+}
+
+const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
+ ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
+ return dynstr_ + offset;
+}
+
+const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
+ if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
+ // Symbol corresponds to "special" (e.g. SHN_ABS) section.
+ return reinterpret_cast<const void *>(sym->st_value);
+ }
+ ABSL_RAW_CHECK(link_base_ < sym->st_value, "symbol out of range");
+ return GetTableElement<char>(ehdr_, 0, 1, sym->st_value - link_base_);
+}
+
+const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
+ ABSL_RAW_CHECK(0 <= index && static_cast<size_t>(index) <= verdefnum_,
+ "index out of range");
+ const ElfW(Verdef) *version_definition = verdef_;
+ while (version_definition->vd_ndx < index && version_definition->vd_next) {
+ const char *const version_definition_as_char =
+ reinterpret_cast<const char *>(version_definition);
+ version_definition =
+ reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
+ version_definition->vd_next);
+ }
+ return version_definition->vd_ndx == index ? version_definition : nullptr;
+}
+
+const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
+ const ElfW(Verdef) *verdef) const {
+ return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
+}
+
+const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
+ ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
+ return dynstr_ + offset;
+}
+
+void ElfMemImage::Init(const void *base) {
+ ehdr_ = nullptr;
+ dynsym_ = nullptr;
+ dynstr_ = nullptr;
+ versym_ = nullptr;
+ verdef_ = nullptr;
+ hash_ = nullptr;
+ strsize_ = 0;
+ verdefnum_ = 0;
+ link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
+ if (!base) {
+ return;
+ }
+ const char *const base_as_char = reinterpret_cast<const char *>(base);
+ if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
+ base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
+ assert(false);
+ return;
+ }
+ int elf_class = base_as_char[EI_CLASS];
+ if (elf_class != kElfClass) {
+ assert(false);
+ return;
+ }
+ switch (base_as_char[EI_DATA]) {
+ case ELFDATA2LSB: {
+ if (__LITTLE_ENDIAN != __BYTE_ORDER) {
+ assert(false);
+ return;
+ }
+ break;
+ }
+ case ELFDATA2MSB: {
+ if (__BIG_ENDIAN != __BYTE_ORDER) {
+ assert(false);
+ return;
+ }
+ break;
+ }
+ default: {
+ assert(false);
+ return;
+ }
+ }
+
+ ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
+ const ElfW(Phdr) *dynamic_program_header = nullptr;
+ for (int i = 0; i < ehdr_->e_phnum; ++i) {
+ const ElfW(Phdr) *const program_header = GetPhdr(i);
+ switch (program_header->p_type) {
+ case PT_LOAD:
+ if (!~link_base_) {
+ link_base_ = program_header->p_vaddr;
+ }
+ break;
+ case PT_DYNAMIC:
+ dynamic_program_header = program_header;
+ break;
+ }
+ }
+ if (!~link_base_ || !dynamic_program_header) {
+ assert(false);
+ // Mark this image as not present. Can not recur infinitely.
+ Init(nullptr);
+ return;
+ }
+ ptrdiff_t relocation =
+ base_as_char - reinterpret_cast<const char *>(link_base_);
+ ElfW(Dyn) *dynamic_entry =
+ reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
+ relocation);
+ for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
+ const ElfW(Xword) value = dynamic_entry->d_un.d_val + relocation;
+ switch (dynamic_entry->d_tag) {
+ case DT_HASH:
+ hash_ = reinterpret_cast<ElfW(Word) *>(value);
+ break;
+ case DT_SYMTAB:
+ dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
+ break;
+ case DT_STRTAB:
+ dynstr_ = reinterpret_cast<const char *>(value);
+ break;
+ case DT_VERSYM:
+ versym_ = reinterpret_cast<ElfW(Versym) *>(value);
+ break;
+ case DT_VERDEF:
+ verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
+ break;
+ case DT_VERDEFNUM:
+ verdefnum_ = dynamic_entry->d_un.d_val;
+ break;
+ case DT_STRSZ:
+ strsize_ = dynamic_entry->d_un.d_val;
+ break;
+ default:
+ // Unrecognized entries explicitly ignored.
+ break;
+ }
+ }
+ if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
+ !verdef_ || !verdefnum_ || !strsize_) {
+ assert(false); // invalid VDSO
+ // Mark this image as not present. Can not recur infinitely.
+ Init(nullptr);
+ return;
+ }
+}
+
+bool ElfMemImage::LookupSymbol(const char *name,
+ const char *version,
+ int type,
+ SymbolInfo *info_out) const {
+ for (const SymbolInfo& info : *this) {
+ if (strcmp(info.name, name) == 0 && strcmp(info.version, version) == 0 &&
+ ElfType(info.symbol) == type) {
+ if (info_out) {
+ *info_out = info;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ElfMemImage::LookupSymbolByAddress(const void *address,
+ SymbolInfo *info_out) const {
+ for (const SymbolInfo& info : *this) {
+ const char *const symbol_start =
+ reinterpret_cast<const char *>(info.address);
+ const char *const symbol_end = symbol_start + info.symbol->st_size;
+ if (symbol_start <= address && address < symbol_end) {
+ if (info_out) {
+ // Client wants to know details for that symbol (the usual case).
+ if (ElfBind(info.symbol) == STB_GLOBAL) {
+ // Strong symbol; just return it.
+ *info_out = info;
+ return true;
+ } else {
+ // Weak or local. Record it, but keep looking for a strong one.
+ *info_out = info;
+ }
+ } else {
+ // Client only cares if there is an overlapping symbol.
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
+ : index_(index), image_(image) {
+}
+
+const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
+ return &info_;
+}
+
+const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
+ return info_;
+}
+
+bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
+ return this->image_ == rhs.image_ && this->index_ == rhs.index_;
+}
+
+bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
+ return !(*this == rhs);
+}
+
+ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
+ this->Update(1);
+ return *this;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::begin() const {
+ SymbolIterator it(this, 0);
+ it.Update(0);
+ return it;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::end() const {
+ return SymbolIterator(this, GetNumSymbols());
+}
+
+void ElfMemImage::SymbolIterator::Update(int increment) {
+ const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
+ ABSL_RAW_CHECK(image->IsPresent() || increment == 0, "");
+ if (!image->IsPresent()) {
+ return;
+ }
+ index_ += increment;
+ if (index_ >= image->GetNumSymbols()) {
+ index_ = image->GetNumSymbols();
+ return;
+ }
+ const ElfW(Sym) *symbol = image->GetDynsym(index_);
+ const ElfW(Versym) *version_symbol = image->GetVersym(index_);
+ ABSL_RAW_CHECK(symbol && version_symbol, "");
+ const char *const symbol_name = image->GetDynstr(symbol->st_name);
+ const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+ const ElfW(Verdef) *version_definition = nullptr;
+ const char *version_name = "";
+ if (symbol->st_shndx == SHN_UNDEF) {
+ // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
+ // version_index could well be greater than verdefnum_, so calling
+ // GetVerdef(version_index) may trigger assertion.
+ } else {
+ version_definition = image->GetVerdef(version_index);
+ }
+ if (version_definition) {
+ // I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
+ // optional 2nd if the version has a parent.
+ ABSL_RAW_CHECK(
+ version_definition->vd_cnt == 1 || version_definition->vd_cnt == 2,
+ "wrong number of entries");
+ const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
+ version_name = image->GetVerstr(version_aux->vda_name);
+ }
+ info_.name = symbol_name;
+ info_.version = version_name;
+ info_.address = image->GetSymAddr(symbol);
+ info_.symbol = symbol;
+}
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_HAVE_ELF_MEM_IMAGE
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
new file mode 100644
index 0000000..d84200d
--- /dev/null
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Allow dynamic symbol lookup for in-memory Elf images.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
+#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
+
+// Including this will define the __GLIBC__ macro if glibc is being
+// used.
+#include <climits>
+
+// Maybe one day we can rewrite this file not to require the elf
+// symbol extensions in glibc, but for right now we need them.
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
+#endif
+
+#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
+ !defined(__asmjs__) && !defined(__wasm__)
+#define ABSL_HAVE_ELF_MEM_IMAGE 1
+#endif
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+
+#include <link.h> // for ElfW
+
+namespace absl {
+namespace debugging_internal {
+
+// An in-memory ELF image (may not exist on disk).
+class ElfMemImage {
+ private:
+ // Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
+ static const int kInvalidBaseSentinel;
+
+ public:
+ // Sentinel: there could never be an elf image at this address.
+ static constexpr const void *const kInvalidBase =
+ static_cast<const void*>(&kInvalidBaseSentinel);
+
+ // Information about a single vdso symbol.
+ // All pointers are into .dynsym, .dynstr, or .text of the VDSO.
+ // Do not free() them or modify through them.
+ struct SymbolInfo {
+ const char *name; // E.g. "__vdso_getcpu"
+ const char *version; // E.g. "LINUX_2.6", could be ""
+ // for unversioned symbol.
+ const void *address; // Relocated symbol address.
+ const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
+ };
+
+ // Supports iteration over all dynamic symbols.
+ class SymbolIterator {
+ public:
+ friend class ElfMemImage;
+ const SymbolInfo *operator->() const;
+ const SymbolInfo &operator*() const;
+ SymbolIterator& operator++();
+ bool operator!=(const SymbolIterator &rhs) const;
+ bool operator==(const SymbolIterator &rhs) const;
+ private:
+ SymbolIterator(const void *const image, int index);
+ void Update(int incr);
+ SymbolInfo info_;
+ int index_;
+ const void *const image_;
+ };
+
+
+ explicit ElfMemImage(const void *base);
+ void Init(const void *base);
+ bool IsPresent() const { return ehdr_ != nullptr; }
+ const ElfW(Phdr)* GetPhdr(int index) const;
+ const ElfW(Sym)* GetDynsym(int index) const;
+ const ElfW(Versym)* GetVersym(int index) const;
+ const ElfW(Verdef)* GetVerdef(int index) const;
+ const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
+ const char* GetDynstr(ElfW(Word) offset) const;
+ const void* GetSymAddr(const ElfW(Sym) *sym) const;
+ const char* GetVerstr(ElfW(Word) offset) const;
+ int GetNumSymbols() const;
+
+ SymbolIterator begin() const;
+ SymbolIterator end() const;
+
+ // Look up versioned dynamic symbol in the image.
+ // Returns false if image is not present, or doesn't contain given
+ // symbol/version/type combination.
+ // If info_out is non-null, additional details are filled in.
+ bool LookupSymbol(const char *name, const char *version,
+ int symbol_type, SymbolInfo *info_out) const;
+
+ // Find info about symbol (if any) which overlaps given address.
+ // Returns true if symbol was found; false if image isn't present
+ // or doesn't have a symbol overlapping given address.
+ // If info_out is non-null, additional details are filled in.
+ bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+ private:
+ const ElfW(Ehdr) *ehdr_;
+ const ElfW(Sym) *dynsym_;
+ const ElfW(Versym) *versym_;
+ const ElfW(Verdef) *verdef_;
+ const ElfW(Word) *hash_;
+ const char *dynstr_;
+ size_t strsize_;
+ size_t verdefnum_;
+ ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
+};
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_HAVE_ELF_MEM_IMAGE
+
+#endif // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
diff --git a/absl/debugging/internal/examine_stack.cc b/absl/debugging/internal/examine_stack.cc
new file mode 100644
index 0000000..1ebc788
--- /dev/null
+++ b/absl/debugging/internal/examine_stack.cc
@@ -0,0 +1,153 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "absl/debugging/internal/examine_stack.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include <csignal>
+#include <cstdio>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/symbolize.h"
+
+namespace absl {
+namespace debugging_internal {
+
+// Returns the program counter from signal context, nullptr if
+// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
+// ucontext_t on non-POSIX systems.
+void* GetProgramCounter(void* vuc) {
+#ifdef __linux__
+ if (vuc != nullptr) {
+ ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
+#if defined(__aarch64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__arm__)
+ return reinterpret_cast<void*>(context->uc_mcontext.arm_pc);
+#elif defined(__i386__)
+ if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[14]);
+#elif defined(__mips__)
+ return reinterpret_cast<void*>(context->uc_mcontext.pc);
+#elif defined(__powerpc64__)
+ return reinterpret_cast<void*>(context->uc_mcontext.gp_regs[32]);
+#elif defined(__powerpc__)
+ return reinterpret_cast<void*>(context->uc_mcontext.regs->nip);
+#elif defined(__s390__) && !defined(__s390x__)
+ return reinterpret_cast<void*>(context->uc_mcontext.psw.addr & 0x7fffffff);
+#elif defined(__s390__) && defined(__s390x__)
+ return reinterpret_cast<void*>(context->uc_mcontext.psw.addr);
+#elif defined(__x86_64__)
+ if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
+ return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
+#else
+#error "Undefined Architecture."
+#endif
+ }
+#elif defined(__akaros__)
+ auto* ctx = reinterpret_cast<struct user_context*>(vuc);
+ return reinterpret_cast<void*>(get_user_ctx_pc(ctx));
+#endif
+ static_cast<void>(vuc);
+ return nullptr;
+}
+
+// The %p field width for printf() functions is two characters per byte,
+// and two extra for the leading "0x".
+static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
+// Print a program counter, its stack frame size, and its symbol name.
+// Note that there is a separate symbolize_pc argument. Return addresses may be
+// at the end of the function, and this allows the caller to back up from pc if
+// appropriate.
+static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
+ void* writerfn_arg, void* pc,
+ void* symbolize_pc, int framesize,
+ const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, symbol);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize, symbol);
+ }
+ writerfn(buf, writerfn_arg);
+}
+
+// Print a program counter and the corresponding stack frame size.
+static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
+ void* writerfn_arg, void* pc, int framesize,
+ const char* const prefix) {
+ char buf[100];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
+ kPrintfPointerFieldWidth, pc);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize);
+ }
+ writerfn(buf, writerfn_arg);
+}
+
+void DumpPCAndFrameSizesAndStackTrace(
+ void* pc, void* const stack[], int frame_sizes[], int depth,
+ int min_dropped_frames, bool symbolize_stacktrace,
+ void (*writerfn)(const char*, void*), void* writerfn_arg) {
+ if (pc != nullptr) {
+ // We don't know the stack frame size for PC, use 0.
+ if (symbolize_stacktrace) {
+ DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
+ } else {
+ DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
+ }
+ }
+ for (int i = 0; i < depth; i++) {
+ if (symbolize_stacktrace) {
+ // Pass the previous address of pc as the symbol address because pc is a
+ // return address, and an overrun may occur when the function ends with a
+ // call to a function annotated noreturn (e.g. CHECK). Note that we don't
+ // do this for pc above, as the adjustment is only correct for return
+ // addresses.
+ DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
+ reinterpret_cast<char*>(stack[i]) - 1,
+ frame_sizes[i], " ");
+ } else {
+ DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
+ " ");
+ }
+ }
+ if (min_dropped_frames > 0) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
+ min_dropped_frames);
+ writerfn(buf, writerfn_arg);
+ }
+}
+
+} // namespace debugging_internal
+} // namespace absl
diff --git a/absl/debugging/internal/examine_stack.h b/absl/debugging/internal/examine_stack.h
new file mode 100644
index 0000000..56c9763
--- /dev/null
+++ b/absl/debugging/internal/examine_stack.h
@@ -0,0 +1,38 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
+#define ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
+
+namespace absl {
+namespace debugging_internal {
+
+// Returns the program counter from signal context, or nullptr if
+// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
+// ucontext_t on non-POSIX systems.
+void* GetProgramCounter(void* vuc);
+
+// Uses `writerfn` to dump the program counter, stack trace, and stack
+// frame sizes.
+void DumpPCAndFrameSizesAndStackTrace(
+ void* pc, void* const stack[], int frame_sizes[], int depth,
+ int min_dropped_frames, bool symbolize_stacktrace,
+ void (*writerfn)(const char*, void*), void* writerfn_arg);
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
diff --git a/absl/debugging/internal/stack_consumption.cc b/absl/debugging/internal/stack_consumption.cc
new file mode 100644
index 0000000..4b05f49
--- /dev/null
+++ b/absl/debugging/internal/stack_consumption.cc
@@ -0,0 +1,172 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/stack_consumption.h"
+
+#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <string.h>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+namespace debugging_internal {
+namespace {
+
+// This code requires that we know the direction in which the stack
+// grows. It is commonly believed that this can be detected by putting
+// a variable on the stack and then passing its address to a function
+// that compares the address of this variable to the address of a
+// variable on the function's own stack. However, this is unspecified
+// behavior in C++: If two pointers p and q of the same type point to
+// different objects that are not members of the same object or
+// elements of the same array or to different functions, or if only
+// one of them is null, the results of p<q, p>q, p<=q, and p>=q are
+// unspecified. Therefore, instead we hardcode the direction of the
+// stack on platforms we know about.
+#if defined(__i386__) || defined(__x86_64__) || defined(__ppc__)
+constexpr bool kStackGrowsDown = true;
+#else
+#error Need to define kStackGrowsDown
+#endif
+
+// To measure the stack footprint of some code, we create a signal handler
+// (for SIGUSR2 say) that exercises this code on an alternate stack. This
+// alternate stack is initialized to some known pattern (0x55, 0x55, 0x55,
+// ...). We then self-send this signal, and after the signal handler returns,
+// look at the alternate stack buffer to see what portion has been touched.
+//
+// This trick gives us the the stack footprint of the signal handler. But the
+// signal handler, even before the code for it is exercised, consumes some
+// stack already. We however only want the stack usage of the code inside the
+// signal handler. To measure this accurately, we install two signal handlers:
+// one that does nothing and just returns, and the user-provided signal
+// handler. The difference between the stack consumption of these two signals
+// handlers should give us the stack foorprint of interest.
+
+void EmptySignalHandler(int) {}
+
+// This is arbitrary value, and could be increase further, at the cost of
+// memset()ting it all to known sentinel value.
+constexpr int kAlternateStackSize = 64 << 10; // 64KiB
+
+constexpr int kSafetyMargin = 32;
+constexpr char kAlternateStackFillValue = 0x55;
+
+// These helper functions look at the alternate stack buffer, and figure
+// out what portion of this buffer has been touched - this is the stack
+// consumption of the signal handler running on this alternate stack.
+// This function will return -1 if the alternate stack buffer has not been
+// touched. It will abort the program if the buffer has overflowed or is about
+// to overflow.
+int GetStackConsumption(const void* const altstack) {
+ const char* begin;
+ int increment;
+ if (kStackGrowsDown) {
+ begin = reinterpret_cast<const char*>(altstack);
+ increment = 1;
+ } else {
+ begin = reinterpret_cast<const char*>(altstack) + kAlternateStackSize - 1;
+ increment = -1;
+ }
+
+ for (int usage_count = kAlternateStackSize; usage_count > 0; --usage_count) {
+ if (*begin != kAlternateStackFillValue) {
+ ABSL_RAW_CHECK(usage_count <= kAlternateStackSize - kSafetyMargin,
+ "Buffer has overflowed or is about to overflow");
+ return usage_count;
+ }
+ begin += increment;
+ }
+
+ ABSL_RAW_LOG(FATAL, "Unreachable code");
+ return -1;
+}
+
+} // namespace
+
+int GetSignalHandlerStackConsumption(void (*signal_handler)(int)) {
+ // The alt-signal-stack cannot be heap allocated because there is a
+ // bug in glibc-2.2 where some signal handler setup code looks at the
+ // current stack pointer to figure out what thread is currently running.
+ // Therefore, the alternate stack must be allocated from the main stack
+ // itself.
+ void* altstack = mmap(nullptr, kAlternateStackSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ABSL_RAW_CHECK(altstack != MAP_FAILED, "mmap() failed");
+
+ // Set up the alt-signal-stack (and save the older one).
+ stack_t sigstk;
+ memset(&sigstk, 0, sizeof(sigstk));
+ stack_t old_sigstk;
+ sigstk.ss_sp = altstack;
+ sigstk.ss_size = kAlternateStackSize;
+ sigstk.ss_flags = 0;
+ ABSL_RAW_CHECK(sigaltstack(&sigstk, &old_sigstk) == 0,
+ "sigaltstack() failed");
+
+ // Set up SIGUSR1 and SIGUSR2 signal handlers (and save the older ones).
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ struct sigaction old_sa1, old_sa2;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_ONSTACK;
+
+ // SIGUSR1 maps to EmptySignalHandler.
+ sa.sa_handler = EmptySignalHandler;
+ ABSL_RAW_CHECK(sigaction(SIGUSR1, &sa, &old_sa1) == 0, "sigaction() failed");
+
+ // SIGUSR2 maps to signal_handler.
+ sa.sa_handler = signal_handler;
+ ABSL_RAW_CHECK(sigaction(SIGUSR2, &sa, &old_sa2) == 0, "sigaction() failed");
+
+ // Send SIGUSR1 signal and measure the stack consumption of the empty
+ // signal handler.
+ // The first signal might use more stack space. Run once and ignore the
+ // results to get that out of the way.
+ ABSL_RAW_CHECK(kill(getpid(), SIGUSR1) == 0, "kill() failed");
+
+ memset(altstack, kAlternateStackFillValue, kAlternateStackSize);
+ ABSL_RAW_CHECK(kill(getpid(), SIGUSR1) == 0, "kill() failed");
+ int base_stack_consumption = GetStackConsumption(altstack);
+
+ // Send SIGUSR2 signal and measure the stack consumption of signal_handler.
+ ABSL_RAW_CHECK(kill(getpid(), SIGUSR2) == 0, "kill() failed");
+ int signal_handler_stack_consumption = GetStackConsumption(altstack);
+
+ // Now restore the old alt-signal-stack and signal handlers.
+ ABSL_RAW_CHECK(sigaltstack(&old_sigstk, nullptr) == 0,
+ "sigaltstack() failed");
+ ABSL_RAW_CHECK(sigaction(SIGUSR1, &old_sa1, nullptr) == 0,
+ "sigaction() failed");
+ ABSL_RAW_CHECK(sigaction(SIGUSR2, &old_sa2, nullptr) == 0,
+ "sigaction() failed");
+
+ ABSL_RAW_CHECK(munmap(altstack, kAlternateStackSize) == 0, "munmap() failed");
+ if (signal_handler_stack_consumption != -1 && base_stack_consumption != -1) {
+ return signal_handler_stack_consumption - base_stack_consumption;
+ }
+ return -1;
+}
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
diff --git a/absl/debugging/internal/stack_consumption.h b/absl/debugging/internal/stack_consumption.h
new file mode 100644
index 0000000..b860a3c
--- /dev/null
+++ b/absl/debugging/internal/stack_consumption.h
@@ -0,0 +1,45 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Helper function for measuring stack consumption of signal handlers.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
+#define ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
+
+// The code in this module is not portable.
+// Use this feature test macro to detect its availability.
+#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+#error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly
+#elif !defined(__APPLE__) && !defined(_WIN32) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__ppc__))
+#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1
+
+namespace absl {
+namespace debugging_internal {
+
+// Returns the stack consumption in bytes for the code exercised by
+// signal_handler. To measure stack consumption, signal_handler is registered
+// as a signal handler, so the code that it exercises must be async-signal
+// safe. The argument of signal_handler is an implementation detail of signal
+// handlers and should ignored by the code for signal_handler. Use global
+// variables to pass information between your test code and signal_handler.
+int GetSignalHandlerStackConsumption(void (*signal_handler)(int));
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
diff --git a/absl/debugging/internal/stack_consumption_test.cc b/absl/debugging/internal/stack_consumption_test.cc
new file mode 100644
index 0000000..68bfa12
--- /dev/null
+++ b/absl/debugging/internal/stack_consumption_test.cc
@@ -0,0 +1,48 @@
+//
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/internal/stack_consumption.h"
+
+#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+
+#include <string.h>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+namespace debugging_internal {
+namespace {
+
+static void SimpleSignalHandler(int signo) {
+ char buf[100];
+ memset(buf, 'a', sizeof(buf));
+
+ // Never true, but prevents compiler from optimizing buf out.
+ if (signo == 0) {
+ ABSL_RAW_LOG(INFO, "%p", static_cast<void*>(buf));
+ }
+}
+
+TEST(SignalHandlerStackConsumptionTest, MeasuresStackConsumption) {
+ // Our handler should consume reasonable number of bytes.
+ EXPECT_GE(GetSignalHandlerStackConsumption(SimpleSignalHandler), 100);
+}
+
+} // namespace
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc
new file mode 100644
index 0000000..7ed6b3e
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -0,0 +1,190 @@
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
+
+// Generate stack tracer for aarch64
+
+#if defined(__linux__)
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iostream>
+
+#include "absl/base/attributes.h"
+#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
+#include "absl/debugging/stacktrace.h"
+
+static const uintptr_t kUnknownFrameSize = 0;
+
+#if defined(__linux__)
+// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
+static const unsigned char* GetKernelRtSigreturnAddress() {
+ constexpr uintptr_t kImpossibleAddress = 1;
+ ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
+ uintptr_t address = memoized.load(std::memory_order_relaxed);
+ if (address != kImpossibleAddress) {
+ return reinterpret_cast<const unsigned char*>(address);
+ }
+
+ address = reinterpret_cast<uintptr_t>(nullptr);
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+ absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
+ if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC,
+ &symbol_info) ||
+ symbol_info.address == nullptr) {
+ // Unexpected: VDSO is present, yet the expected symbol is missing
+ // or null.
+ assert(false && "VDSO is present, but doesn't have expected symbol");
+ } else {
+ if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
+ kImpossibleAddress) {
+ address = reinterpret_cast<uintptr_t>(symbol_info.address);
+ } else {
+ assert(false && "VDSO returned invalid address");
+ }
+ }
+ }
+#endif
+
+ memoized.store(address, std::memory_order_relaxed);
+ return reinterpret_cast<const unsigned char*>(address);
+}
+#endif // __linux__
+
+// Compute the size of a stack frame in [low..high). We assume that
+// low < high. Return size of kUnknownFrameSize.
+template<typename T>
+static inline uintptr_t ComputeStackFrameSize(const T* low,
+ const T* high) {
+ const char* low_char_ptr = reinterpret_cast<const char *>(low);
+ const char* high_char_ptr = reinterpret_cast<const char *>(high);
+ return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
+static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
+ void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
+ bool check_frame_size = true;
+
+#if defined(__linux__)
+ if (WITH_CONTEXT && uc != nullptr) {
+ // Check to see if next frame's return address is __kernel_rt_sigreturn.
+ if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
+ const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+ // old_frame_pointer[0] is not suitable for unwinding, look at
+ // ucontext to discover frame pointer before signal.
+ void **const pre_signal_frame_pointer =
+ reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
+
+ // Check that alleged frame pointer is actually readable. This is to
+ // prevent "double fault" in case we hit the first fault due to e.g.
+ // stack corruption.
+ if (!absl::debugging_internal::AddressIsReadable(
+ pre_signal_frame_pointer))
+ return nullptr;
+
+ // Alleged frame pointer is readable, use it for further unwinding.
+ new_frame_pointer = pre_signal_frame_pointer;
+
+ // Skip frame size check if we return from a signal. We may be using a
+ // an alternate stack for signals.
+ check_frame_size = false;
+ }
+ }
+#endif
+
+ // aarch64 ABI requires stack pointer to be 16-byte-aligned.
+ if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
+ return nullptr;
+
+ // Check frame size. In strict mode, we assume frames to be under
+ // 100,000 bytes. In non-strict mode, we relax the limit to 1MB.
+ if (check_frame_size) {
+ const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
+ const uintptr_t frame_size =
+ ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
+ if (frame_size == kUnknownFrameSize || frame_size > max_size)
+ return nullptr;
+ }
+
+ return new_frame_pointer;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+#ifdef __GNUC__
+ void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
+#else
+# error reading stack point not yet supported on this platform.
+#endif
+
+ skip_count++; // Skip the frame for this function.
+ int n = 0;
+
+ // The frame pointer points to low address of a frame. The first 64-bit
+ // word of a frame points to the next frame up the call chain, which normally
+ // is just after the high address of the current frame. The second word of
+ // a frame contains return adress of to the caller. To find a pc value
+ // associated with the current frame, we need to go down a level in the call
+ // chain. So we remember return the address of the last frame seen. This
+ // does not work for the first stack frame, which belongs to UnwindImp() but
+ // we skip the frame for UnwindImp() anyway.
+ void* prev_return_address = nullptr;
+
+ while (frame_pointer && n < max_depth) {
+ // The absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few bogus
+ // entries in some rare cases).
+ void **next_frame_pointer =
+ NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = prev_return_address;
+ if (IS_STACK_FRAMES) {
+ sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
+ }
+ n++;
+ }
+ prev_return_address = frame_pointer[1];
+ frame_pointer = next_frame_pointer;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 200;
+ int j = 0;
+ for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ frame_pointer =
+ NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
diff --git a/absl/debugging/internal/stacktrace_arm-inl.inc b/absl/debugging/internal/stacktrace_arm-inl.inc
new file mode 100644
index 0000000..c840833
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_arm-inl.inc
@@ -0,0 +1,123 @@
+// Copyright 2011 and onwards Google Inc.
+// All rights reserved.
+//
+// Author: Doug Kwan
+// This is inspired by Craig Silverstein's PowerPC stacktrace code.
+//
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
+
+#include <cstdint>
+
+#include "absl/debugging/stacktrace.h"
+
+// WARNING:
+// This only works if all your code is in either ARM or THUMB mode. With
+// interworking, the frame pointer of the caller can either be in r11 (ARM
+// mode) or r7 (THUMB mode). A callee only saves the frame pointer of its
+// mode in a fixed location on its stack frame. If the caller is a different
+// mode, there is no easy way to find the frame pointer. It can either be
+// still in the designated register or saved on stack along with other callee
+// saved registers.
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return nullptr if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING>
+static void **NextStackFrame(void **old_sp) {
+ void **new_sp = (void**) old_sp[-1];
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return nullptr;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return nullptr;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
+ }
+ if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr;
+ return new_sp;
+}
+
+// This ensures that absl::GetStackTrace sets up the Link Register properly.
+#ifdef __GNUC__
+void StacktraceArmDummyFunction() __attribute__((noinline));
+void StacktraceArmDummyFunction() { __asm__ volatile(""); }
+#else
+# error StacktraceArmDummyFunction() needs to be ported to this platform.
+#endif
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void * /* ucp */, int *min_dropped_frames) {
+#ifdef __GNUC__
+ void **sp = reinterpret_cast<void**>(__builtin_frame_address(0));
+#else
+# error reading stack point not yet supported on this platform.
+#endif
+
+ // On ARM, the return address is stored in the link register (r14).
+ // This is not saved on the stack frame of a leaf function. To
+ // simplify code that reads return addresses, we call a dummy
+ // function so that the return address of this function is also
+ // stored in the stack frame. This works at least for gcc.
+ StacktraceArmDummyFunction();
+
+ int n = 0;
+ while (sp && n < max_depth) {
+ // The absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few bogus
+ // entries in some rare cases).
+ void **next_sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
+
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = *sp;
+
+ if (IS_STACK_FRAMES) {
+ if (next_sp > sp) {
+ sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
+ }
+ n++;
+ }
+ sp = next_sp;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 200;
+ int j = 0;
+ for (; sp != nullptr && j < kMaxUnwind; j++) {
+ sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return false;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
diff --git a/absl/debugging/internal/stacktrace_config.h b/absl/debugging/internal/stacktrace_config.h
new file mode 100644
index 0000000..d4e8480
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_config.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+ * Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing
+ * actual unwinder implementation.
+ * This header is "private" to stacktrace.cc.
+ * DO NOT include it into any other files.
+*/
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
+
+#if defined(ABSL_STACKTRACE_INL_HEADER)
+#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
+
+#elif defined(_WIN32)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_win32-inl.inc"
+
+#elif defined(__linux__) && !defined(__ANDROID__)
+
+#if !defined(NO_FRAME_POINTER)
+# if defined(__i386__) || defined(__x86_64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_x86-inl.inc"
+# elif defined(__ppc__) || defined(__PPC__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_powerpc-inl.inc"
+# elif defined(__aarch64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_aarch64-inl.inc"
+# elif defined(__arm__)
+// Note: When using glibc this may require -funwind-tables to function properly.
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_generic-inl.inc"
+# else
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+# endif
+#else // defined(NO_FRAME_POINTER)
+# if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_generic-inl.inc"
+# elif defined(__ppc__) || defined(__PPC__)
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_generic-inl.inc"
+# else
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+# endif
+#endif // NO_FRAME_POINTER
+
+#else
+#define ABSL_STACKTRACE_INL_HEADER \
+ "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+
+#endif
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
diff --git a/absl/debugging/internal/stacktrace_generic-inl.inc b/absl/debugging/internal/stacktrace_generic-inl.inc
new file mode 100644
index 0000000..81a49ef
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_generic-inl.inc
@@ -0,0 +1,97 @@
+// Copyright 2000 - 2007 Google Inc.
+// All rights reserved.
+//
+// Author: Sanjay Ghemawat
+//
+// Portable implementation - just use glibc
+//
+// Note: The glibc implementation may cause a call to malloc.
+// This can cause a deadlock in HeapProfiler.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
+
+#include <execinfo.h>
+#include <atomic>
+#include <cstring>
+
+#include "absl/debugging/stacktrace.h"
+#include "absl/base/attributes.h"
+
+// Sometimes, we can try to get a stack trace from within a stack
+// trace, because we don't block signals inside this code (which would be too
+// expensive: the two extra system calls per stack trace do matter here).
+// That can cause a self-deadlock.
+// Protect against such reentrant call by failing to get a stack trace.
+//
+// We use __thread here because the code here is extremely low level -- it is
+// called while collecting stack traces from within malloc and mmap, and thus
+// can not call anything which might call malloc or mmap itself.
+static __thread int recursive = 0;
+
+// The stack trace function might be invoked very early in the program's
+// execution (e.g. from the very first malloc if using tcmalloc). Also, the
+// glibc implementation itself will trigger malloc the first time it is called.
+// As such, we suppress usage of backtrace during this early stage of execution.
+static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
+// Waiting until static initializers run seems to be late enough.
+// This file is included into stacktrace.cc so this will only run once.
+ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
+ void* unused_stack[1];
+ // Force the first backtrace to happen early to get the one-time shared lib
+ // loading (allocation) out of the way. After the first call it is much safer
+ // to use backtrace from a signal handler if we crash somewhere later.
+ backtrace(unused_stack, 1);
+ disable_stacktraces.store(false, std::memory_order_relaxed);
+ return 0;
+}();
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
+ return 0;
+ }
+ ++recursive;
+
+ static_cast<void>(ucp); // Unused.
+ static const int kStackLength = 64;
+ void * stack[kStackLength];
+ int size;
+
+ size = backtrace(stack, kStackLength);
+ skip_count++; // we want to skip the current frame as well
+ int result_count = size - skip_count;
+ if (result_count < 0)
+ result_count = 0;
+ if (result_count > max_depth)
+ result_count = max_depth;
+ for (int i = 0; i < result_count; i++)
+ result[i] = stack[i + skip_count];
+
+ if (IS_STACK_FRAMES) {
+ // No implementation for finding out the stack frame sizes yet.
+ memset(sizes, 0, sizeof(*sizes) * result_count);
+ }
+ if (min_dropped_frames != nullptr) {
+ if (size - skip_count - max_depth > 0) {
+ *min_dropped_frames = size - skip_count - max_depth;
+ } else {
+ *min_dropped_frames = 0;
+ }
+ }
+
+ --recursive;
+
+ return result_count;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
diff --git a/absl/debugging/internal/stacktrace_powerpc-inl.inc b/absl/debugging/internal/stacktrace_powerpc-inl.inc
new file mode 100644
index 0000000..3a070ee
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -0,0 +1,246 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produce stack trace. I'm guessing (hoping!) the code is much like
+// for x86. For apple machines, at least, it seems to be; see
+// https://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html
+// https://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK
+// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
+
+#if defined(__linux__)
+#include <asm/ptrace.h> // for PT_NIP.
+#include <ucontext.h> // for ucontext_t
+#endif
+
+#include <unistd.h>
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+
+#include "absl/base/attributes.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
+
+// Given a stack pointer, return the saved link register value.
+// Note that this is the link register for a callee.
+static inline void *StacktracePowerPCGetLR(void **sp) {
+ // PowerPC has 3 main ABIs, which say where in the stack the
+ // Link Register is. For DARWIN and AIX (used by apple and
+ // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc),
+ // it's in sp[1].
+#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
+ return *(sp+2);
+#elif defined(_CALL_SYSV)
+ return *(sp+1);
+#elif defined(__APPLE__) || defined(__FreeBSD__) || \
+ (defined(__linux__) && defined(__PPC64__))
+ // This check is in case the compiler doesn't define _CALL_AIX/etc.
+ return *(sp+2);
+#elif defined(__linux)
+ // This check is in case the compiler doesn't define _CALL_SYSV.
+ return *(sp+1);
+#else
+#error Need to specify the PPC ABI for your archiecture.
+#endif
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_sp, const void *uc) {
+ void **new_sp = (void **) *old_sp;
+ enum { kStackAlignment = 16 };
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return nullptr;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return nullptr;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
+ }
+ if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr;
+
+#if defined(__linux__)
+ enum StackTraceKernelSymbolStatus {
+ kNotInitialized = 0, kAddressValid, kAddressInvalid };
+
+ if (IS_WITH_CONTEXT && uc != nullptr) {
+ static StackTraceKernelSymbolStatus kernel_symbol_status =
+ kNotInitialized; // Sentinel: not computed yet.
+ // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not
+ // possibly be there.
+ static const unsigned char *kernel_sigtramp_rt64_address = nullptr;
+ if (kernel_symbol_status == kNotInitialized) {
+ absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ absl::debugging_internal::VDSOSupport::SymbolInfo
+ sigtramp_rt64_symbol_info;
+ if (!vdso.LookupSymbol(
+ "__kernel_sigtramp_rt64", "LINUX_2.6.15",
+ absl::debugging_internal::VDSOSupport::kVDSOSymbolType,
+ &sigtramp_rt64_symbol_info) ||
+ sigtramp_rt64_symbol_info.address == nullptr) {
+ // Unexpected: VDSO is present, yet the expected symbol is missing
+ // or null.
+ assert(false && "VDSO is present, but doesn't have expected symbol");
+ kernel_symbol_status = kAddressInvalid;
+ } else {
+ kernel_sigtramp_rt64_address =
+ reinterpret_cast<const unsigned char *>(
+ sigtramp_rt64_symbol_info.address);
+ kernel_symbol_status = kAddressValid;
+ }
+ } else {
+ kernel_symbol_status = kAddressInvalid;
+ }
+ }
+
+ if (new_sp != nullptr &&
+ kernel_symbol_status == kAddressValid &&
+ StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) {
+ const ucontext_t* signal_context =
+ reinterpret_cast<const ucontext_t*>(uc);
+ void **const sp_before_signal =
+ reinterpret_cast<void**>(signal_context->uc_mcontext.gp_regs[PT_R1]);
+ // Check that alleged sp before signal is nonnull and is reasonably
+ // aligned.
+ if (sp_before_signal != nullptr &&
+ ((uintptr_t)sp_before_signal % kStackAlignment) == 0) {
+ // Check that alleged stack pointer is actually readable. This is to
+ // prevent a "double fault" in case we hit the first fault due to e.g.
+ // a stack corruption.
+ if (absl::debugging_internal::AddressIsReadable(sp_before_signal)) {
+ // Alleged stack pointer is readable, use it for further unwinding.
+ new_sp = sp_before_signal;
+ }
+ }
+ }
+ }
+#endif
+
+ return new_sp;
+}
+
+// This ensures that absl::GetStackTrace sets up the Link Register properly.
+ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() {
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ void **sp;
+ // Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
+ // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a
+ // different asm syntax. I don't know quite the best way to discriminate
+ // systems using the old as from the new one; I've gone with __APPLE__.
+#ifdef __APPLE__
+ __asm__ volatile ("mr %0,r1" : "=r" (sp));
+#else
+ __asm__ volatile ("mr %0,1" : "=r" (sp));
+#endif
+
+ // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack
+ // entry that holds the return address of the subroutine call (what
+ // instruction we run after our function finishes). This is the
+ // same as the stack-pointer of our parent routine, which is what we
+ // want here. While the compiler will always(?) set up LR for
+ // subroutine calls, it may not for leaf functions (such as this one).
+ // This routine forces the compiler (at least gcc) to push it anyway.
+ AbslStacktracePowerPCDummyFunction();
+
+ // The LR save area is used by the callee, so the top entry is bogus.
+ skip_count++;
+
+ int n = 0;
+
+ // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in
+ // the link register) of a function call is stored in the caller's stack
+ // frame instead of the callee's. When we look for the return address
+ // associated with a stack frame, we need to make sure that there is a
+ // caller frame before it. So we call NextStackFrame before entering the
+ // loop below and check next_sp instead of sp for loop termination.
+ // The outermost frame is set up by runtimes and it does not have a
+ // caller frame, so it is skipped.
+
+ // The absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few
+ // bogus entries in some rare cases).
+ void **next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
+
+ while (next_sp && n < max_depth) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = StacktracePowerPCGetLR(sp);
+ if (IS_STACK_FRAMES) {
+ if (next_sp > sp) {
+ sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
+ }
+ n++;
+ }
+
+ sp = next_sp;
+ next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
+ }
+
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 1000;
+ int j = 0;
+ for (; next_sp != nullptr && j < kMaxUnwind; j++) {
+ next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
diff --git a/absl/debugging/internal/stacktrace_unimplemented-inl.inc b/absl/debugging/internal/stacktrace_unimplemented-inl.inc
new file mode 100644
index 0000000..e256fdd
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_unimplemented-inl.inc
@@ -0,0 +1,22 @@
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** /* result */, int* /* sizes */,
+ int /* max_depth */, int /* skip_count */,
+ const void* /* ucp */, int *min_dropped_frames) {
+ if (min_dropped_frames != nullptr) {
+ *min_dropped_frames = 0;
+ }
+ return 0;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return false;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
diff --git a/absl/debugging/internal/stacktrace_win32-inl.inc b/absl/debugging/internal/stacktrace_win32-inl.inc
new file mode 100644
index 0000000..b46491f
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_win32-inl.inc
@@ -0,0 +1,83 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produces a stack trace for Windows. Normally, one could use
+// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that
+// should work for binaries compiled using MSVC in "debug" mode.
+// However, in "release" mode, Windows uses frame-pointer
+// optimization, which makes getting a stack trace very difficult.
+//
+// There are several approaches one can take. One is to use Windows
+// intrinsics like StackWalk64. These can work, but have restrictions
+// on how successful they can be. Another attempt is to write a
+// version of stacktrace_x86-inl.h that has heuristic support for
+// dealing with FPO, similar to what WinDbg does (see
+// http://www.nynaeve.net/?p=97). There are (non-working) examples of
+// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1
+//
+// The solution we've ended up doing is to call the undocumented
+// windows function RtlCaptureStackBackTrace, which probably doesn't
+// work with FPO but at least is fast, and doesn't require a symbol
+// server.
+//
+// This code is inspired by a patch from David Vitek:
+// https://code.google.com/p/google-perftools/issues/detail?id=83
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
+
+#include <windows.h> // for GetProcAddress and GetModuleHandle
+#include <cassert>
+
+typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
+ IN ULONG frames_to_skip,
+ IN ULONG frames_to_capture,
+ OUT PVOID *backtrace,
+ OUT PULONG backtrace_hash);
+
+// Load the function we need at static init time, where we don't have
+// to worry about someone else holding the loader's lock.
+static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
+ (RtlCaptureStackBackTrace_Function*)
+ GetProcAddress(GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace");
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ int n = 0;
+ if (!RtlCaptureStackBackTrace_fn) {
+ // can't find a stacktrace with no function to call
+ } else {
+ n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0);
+ }
+ if (IS_STACK_FRAMES) {
+ // No implementation for finding out the stack frame sizes yet.
+ memset(sizes, 0, sizeof(*sizes) * n);
+ }
+ if (min_dropped_frames != nullptr) {
+ // Not implemented.
+ *min_dropped_frames = 0;
+ }
+ return n;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return false;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc
new file mode 100644
index 0000000..9494441
--- /dev/null
+++ b/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -0,0 +1,340 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Produce stack trace
+
+#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
+#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
+
+#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
+#include <ucontext.h> // for ucontext_t
+#endif
+
+#if !defined(_WIN32)
+#include <unistd.h>
+#endif
+
+#include <cassert>
+#include <cstdint>
+
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/debugging/internal/address_is_readable.h"
+#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
+#include "absl/debugging/stacktrace.h"
+
+#include "absl/base/internal/raw_logging.h"
+
+using absl::debugging_internal::AddressIsReadable;
+
+#if defined(__linux__) && defined(__i386__)
+// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
+// preceeding "syscall" or "sysenter".
+// If __kernel_vsyscall uses frame pointer, answer 0.
+//
+// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
+// to analyze before giving up. Up to kMaxBytes+1 bytes of
+// instructions could be accessed.
+//
+// Here are known __kernel_vsyscall instruction sequences:
+//
+// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
+// Used on Intel.
+// 0xffffe400 <__kernel_vsyscall+0>: push %ecx
+// 0xffffe401 <__kernel_vsyscall+1>: push %edx
+// 0xffffe402 <__kernel_vsyscall+2>: push %ebp
+// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp
+// 0xffffe405 <__kernel_vsyscall+5>: sysenter
+//
+// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
+// Used on AMD.
+// 0xffffe400 <__kernel_vsyscall+0>: push %ebp
+// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
+// 0xffffe403 <__kernel_vsyscall+3>: syscall
+//
+
+// The sequence below isn't actually expected in Google fleet,
+// here only for completeness. Remove this comment from OSS release.
+
+// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
+// 0xffffe400 <__kernel_vsyscall+0>: int $0x80
+// 0xffffe401 <__kernel_vsyscall+1>: ret
+//
+static const int kMaxBytes = 10;
+
+// We use assert()s instead of DCHECK()s -- this is too low level
+// for DCHECK().
+
+static int CountPushInstructions(const unsigned char *const addr) {
+ int result = 0;
+ for (int i = 0; i < kMaxBytes; ++i) {
+ if (addr[i] == 0x89) {
+ // "mov reg,reg"
+ if (addr[i + 1] == 0xE5) {
+ // Found "mov %esp,%ebp".
+ return 0;
+ }
+ ++i; // Skip register encoding byte.
+ } else if (addr[i] == 0x0F &&
+ (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
+ // Found "sysenter" or "syscall".
+ return result;
+ } else if ((addr[i] & 0xF0) == 0x50) {
+ // Found "push %reg".
+ ++result;
+ } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
+ // Found "int $0x80"
+ assert(result == 0);
+ return 0;
+ } else {
+ // Unexpected instruction.
+ assert(false && "unexpected instruction in __kernel_vsyscall");
+ return 0;
+ }
+ }
+ // Unexpected: didn't find SYSENTER or SYSCALL in
+ // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
+ assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
+ return 0;
+}
+#endif
+
+// Assume stack frames larger than 100,000 bytes are bogus.
+static const int kMaxFrameBytes = 100000;
+
+// Returns the stack frame pointer from signal context, 0 if unknown.
+// vuc is a ucontext_t *. We use void* to avoid the use
+// of ucontext_t on non-POSIX systems.
+static uintptr_t GetFP(const void *vuc) {
+#if !defined(__linux__)
+ static_cast<void>(vuc); // Avoid an unused argument compiler warning.
+#else
+ if (vuc != nullptr) {
+ auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
+#if defined(__i386__)
+ const auto bp = uc->uc_mcontext.gregs[REG_EBP];
+ const auto sp = uc->uc_mcontext.gregs[REG_ESP];
+#elif defined(__x86_64__)
+ const auto bp = uc->uc_mcontext.gregs[REG_RBP];
+ const auto sp = uc->uc_mcontext.gregs[REG_RSP];
+#else
+ const uintptr_t bp = 0;
+ const uintptr_t sp = 0;
+#endif
+ // Sanity-check that the base pointer is valid. It should be as long as
+ // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
+ // the process is compiled with --copt=-fomit-frame-pointer or
+ // --copt=-momit-leaf-frame-pointer.
+ //
+ // TODO(bcmills): -momit-leaf-frame-pointer is currently the default
+ // behavior when building with clang. Talk to the C++ toolchain team about
+ // fixing that.
+ if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
+
+ // If bp isn't a plausible frame pointer, return the stack pointer instead.
+ // If we're lucky, it points to the start of a stack frame; otherwise, we'll
+ // get one frame of garbage in the stack trace and fail the sanity check on
+ // the next iteration.
+ return sp;
+ }
+#endif
+ return 0;
+}
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return null if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_fp, const void *uc) {
+ void **new_fp = (void **)*old_fp;
+
+#if defined(__linux__) && defined(__i386__)
+ if (WITH_CONTEXT && uc != nullptr) {
+ // How many "push %reg" instructions are there at __kernel_vsyscall?
+ // This is constant for a given kernel and processor, so compute
+ // it only once.
+ static int num_push_instructions = -1; // Sentinel: not computed yet.
+ // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
+ // be there.
+ static const unsigned char *kernel_rt_sigreturn_address = nullptr;
+ static const unsigned char *kernel_vsyscall_address = nullptr;
+ if (num_push_instructions == -1) {
+ absl::debugging_internal::VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ absl::debugging_internal::VDSOSupport::SymbolInfo
+ rt_sigreturn_symbol_info;
+ absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
+ if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
+ &rt_sigreturn_symbol_info) ||
+ !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
+ &vsyscall_symbol_info) ||
+ rt_sigreturn_symbol_info.address == nullptr ||
+ vsyscall_symbol_info.address == nullptr) {
+ // Unexpected: 32-bit VDSO is present, yet one of the expected
+ // symbols is missing or null.
+ assert(false && "VDSO is present, but doesn't have expected symbols");
+ num_push_instructions = 0;
+ } else {
+ kernel_rt_sigreturn_address =
+ reinterpret_cast<const unsigned char *>(
+ rt_sigreturn_symbol_info.address);
+ kernel_vsyscall_address =
+ reinterpret_cast<const unsigned char *>(
+ vsyscall_symbol_info.address);
+ num_push_instructions =
+ CountPushInstructions(kernel_vsyscall_address);
+ }
+ } else {
+ num_push_instructions = 0;
+ }
+ }
+ if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
+ old_fp[1] == kernel_rt_sigreturn_address) {
+ const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
+ // This kernel does not use frame pointer in its VDSO code,
+ // and so %ebp is not suitable for unwinding.
+ void **const reg_ebp =
+ reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
+ const unsigned char *const reg_eip =
+ reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
+ if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
+ reg_eip - kernel_vsyscall_address < kMaxBytes) {
+ // We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
+ // Restore from 'ucv' instead.
+ void **const reg_esp =
+ reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
+ // Check that alleged %esp is not null and is reasonably aligned.
+ if (reg_esp &&
+ ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
+ // Check that alleged %esp is actually readable. This is to prevent
+ // "double fault" in case we hit the first fault due to e.g. stack
+ // corruption.
+ void *const reg_esp2 = reg_esp[num_push_instructions - 1];
+ if (AddressIsReadable(reg_esp2)) {
+ // Alleged %esp is readable, use it for further unwinding.
+ new_fp = reinterpret_cast<void **>(reg_esp2);
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
+ const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);
+
+ // Check that the transition from frame pointer old_fp to frame
+ // pointer new_fp isn't clearly bogus. Skip the checks if new_fp
+ // matches the signal context, so that we don't skip out early when
+ // using an alternate signal stack.
+ //
+ // TODO(bcmills): The GetFP call should be completely unnecessary when
+ // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
+ // stack by this point), but it is empirically still needed (e.g. when the
+ // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some
+ // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what
+ // it's supposed to.
+ if (STRICT_UNWINDING &&
+ (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_fp_u <= old_fp_u) return nullptr;
+ if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
+ } else {
+ if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_fp == old_fp) return nullptr;
+ }
+
+ if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
+#ifdef __i386__
+ // On 32-bit machines, the stack pointer can be very close to
+ // 0xffffffff, so we explicitly check for a pointer into the
+ // last two pages in the address space
+ if (new_fp_u >= 0xffffe000) return nullptr;
+#endif
+#if !defined(_WIN32)
+ if (!STRICT_UNWINDING) {
+ // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
+ // on AMD-based machines with VDSO-enabled kernels.
+ // Make an extra sanity check to insure new_fp is readable.
+ // Note: NextStackFrame<false>() is only called while the program
+ // is already on its last leg, so it's ok to be slow here.
+
+ if (!AddressIsReadable(new_fp)) {
+ return nullptr;
+ }
+ }
+#endif
+ return new_fp;
+}
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+ABSL_ATTRIBUTE_NOINLINE
+static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
+ const void *ucp, int *min_dropped_frames) {
+ int n = 0;
+ void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
+
+ while (fp && n < max_depth) {
+ if (*(fp + 1) == reinterpret_cast<void *>(0)) {
+ // In 64-bit code, we often see a frame that
+ // points to itself and has a return address of 0.
+ break;
+ }
+ void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n] = *(fp + 1);
+ if (IS_STACK_FRAMES) {
+ if (next_fp > fp) {
+ sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
+ } else {
+ // A frame-size of 0 is used to indicate unknown frame size.
+ sizes[n] = 0;
+ }
+ }
+ n++;
+ }
+ fp = next_fp;
+ }
+ if (min_dropped_frames != nullptr) {
+ // Implementation detail: we clamp the max of frames we are willing to
+ // count, so as not to spend too much time in the loop below.
+ const int kMaxUnwind = 1000;
+ int j = 0;
+ for (; fp != nullptr && j < kMaxUnwind; j++) {
+ fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
+ }
+ *min_dropped_frames = j;
+ }
+ return n;
+}
+
+namespace absl {
+namespace debugging_internal {
+bool StackTraceWorksForTest() {
+ return true;
+}
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
diff --git a/absl/debugging/internal/symbolize.h b/absl/debugging/internal/symbolize.h
new file mode 100644
index 0000000..3e53789
--- /dev/null
+++ b/absl/debugging/internal/symbolize.h
@@ -0,0 +1,122 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains internal parts of the Abseil symbolizer.
+// Do not depend on the anything in this file, it may change at anytime.
+
+#ifndef ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
+#define ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
+#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set
+#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
+ !defined(__asmjs__) && !defined(__wasm__)
+#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1
+
+#include <elf.h>
+#include <link.h> // For ElfW() macro.
+#include <functional>
+#include <string>
+
+namespace absl {
+namespace debugging_internal {
+
+// Iterates over all sections, invoking callback on each with the section name
+// and the section header.
+//
+// Returns true on success; otherwise returns false in case of errors.
+//
+// This is not async-signal-safe.
+bool ForEachSection(int fd,
+ const std::function<bool(const std::string& name,
+ const ElfW(Shdr) &)>& callback);
+
+// Gets the section header for the given name, if it exists. Returns true on
+// success. Otherwise, returns false.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) *out);
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
+
+namespace absl {
+namespace debugging_internal {
+
+struct SymbolDecoratorArgs {
+ // The program counter we are getting symbolic name for.
+ const void *pc;
+ // 0 for main executable, load address for shared libraries.
+ ptrdiff_t relocation;
+ // Read-only file descriptor for ELF image covering "pc",
+ // or -1 if no such ELF image exists in /proc/self/maps.
+ int fd;
+ // Output buffer, size.
+ // Note: the buffer may not be empty -- default symbolizer may have already
+ // produced some output, and earlier decorators may have adorned it in
+ // some way. You are free to replace or augment the contents (within the
+ // symbol_buf_size limit).
+ char *const symbol_buf;
+ size_t symbol_buf_size;
+ // Temporary scratch space, size.
+ // Use that space in preference to allocating your own stack buffer to
+ // conserve stack.
+ char *const tmp_buf;
+ size_t tmp_buf_size;
+ // User-provided argument
+ void* arg;
+};
+using SymbolDecorator = void (*)(const SymbolDecoratorArgs *);
+
+// Installs a function-pointer as a decorator. Returns a value less than zero
+// if the system cannot install the decorator. Otherwise, returns a unique
+// identifier corresponding to the decorator. This identifier can be used to
+// uninstall the decorator - See RemoveSymbolDecorator() below.
+int InstallSymbolDecorator(SymbolDecorator decorator, void* arg);
+
+// Removes a previously installed function-pointer decorator. Parameter "ticket"
+// is the return-value from calling InstallSymbolDecorator().
+bool RemoveSymbolDecorator(int ticket);
+
+// Remove all installed decorators. Returns true if successful, false if
+// symbolization is currently in progress.
+bool RemoveAllSymbolDecorators(void);
+
+// Registers an address range to a file mapping.
+//
+// Preconditions:
+// start <= end
+// filename != nullptr
+//
+// Returns true if the file was successfully registered.
+bool RegisterFileMappingHint(
+ const void* start, const void* end, uint64_t offset, const char* filename);
+
+// Looks up the file mapping registered by RegisterFileMappingHint for an
+// address range. If there is one, the file name is stored in *filename and
+// *start and *end are modified to reflect the registered mapping. Returns
+// whether any hint was found.
+bool GetFileMappingHint(const void** start,
+ const void** end,
+ uint64_t * offset,
+ const char** filename);
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc
new file mode 100644
index 0000000..d13ef25
--- /dev/null
+++ b/absl/debugging/internal/vdso_support.cc
@@ -0,0 +1,192 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Allow dynamic symbol lookup in the kernel VDSO page.
+//
+// VDSOSupport -- a class representing kernel VDSO (if present).
+
+#include "absl/debugging/internal/vdso_support.h"
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval.
+#include <sys/auxv.h>
+#endif
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/port.h"
+
+#ifndef AT_SYSINFO_EHDR
+#define AT_SYSINFO_EHDR 33 // for crosstoolv10
+#endif
+
+namespace absl {
+namespace debugging_internal {
+
+ABSL_CONST_INIT
+std::atomic<const void *> VDSOSupport::vdso_base_(
+ debugging_internal::ElfMemImage::kInvalidBase);
+
+std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
+VDSOSupport::VDSOSupport()
+ // If vdso_base_ is still set to kInvalidBase, we got here
+ // before VDSOSupport::Init has been called. Call it now.
+ : image_(vdso_base_.load(std::memory_order_relaxed) ==
+ debugging_internal::ElfMemImage::kInvalidBase
+ ? Init()
+ : vdso_base_.load(std::memory_order_relaxed)) {}
+
+// NOTE: we can't use GoogleOnceInit() below, because we can be
+// called by tcmalloc, and none of the *once* stuff may be functional yet.
+//
+// In addition, we hope that the VDSOSupportHelper constructor
+// causes this code to run before there are any threads, and before
+// InitGoogle() has executed any chroot or setuid calls.
+//
+// Finally, even if there is a race here, it is harmless, because
+// the operation should be idempotent.
+const void *VDSOSupport::Init() {
+ const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
+#if __GLIBC_PREREQ(2, 16)
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ errno = 0;
+ const void *const sysinfo_ehdr =
+ reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
+ if (errno == 0) {
+ vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
+ }
+ }
+#endif // __GLIBC_PREREQ(2, 16)
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
+ // on stack, and so glibc works as if VDSO was not present.
+ // But going directly to kernel via /proc/self/auxv below bypasses
+ // Valgrind zapping. So we check for Valgrind separately.
+ if (RunningOnValgrind()) {
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd == -1) {
+ // Kernel too old to have a VDSO.
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ ElfW(auxv_t) aux;
+ while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
+ if (aux.a_type == AT_SYSINFO_EHDR) {
+ vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
+ std::memory_order_relaxed);
+ break;
+ }
+ }
+ close(fd);
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ // Didn't find AT_SYSINFO_EHDR in auxv[].
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ }
+ }
+ GetCpuFn fn = &GetCPUViaSyscall; // default if VDSO not present.
+ if (vdso_base_.load(std::memory_order_relaxed)) {
+ VDSOSupport vdso;
+ SymbolInfo info;
+ if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
+ fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
+ }
+ }
+ // Subtle: this code runs outside of any locks; prevent compiler
+ // from assigning to getcpu_fn_ more than once.
+ getcpu_fn_.store(fn, std::memory_order_relaxed);
+ return vdso_base_.load(std::memory_order_relaxed);
+}
+
+const void *VDSOSupport::SetBase(const void *base) {
+ ABSL_RAW_CHECK(base != debugging_internal::ElfMemImage::kInvalidBase,
+ "internal error");
+ const void *old_base = vdso_base_.load(std::memory_order_relaxed);
+ vdso_base_.store(base, std::memory_order_relaxed);
+ image_.Init(base);
+ // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
+ getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
+ return old_base;
+}
+
+bool VDSOSupport::LookupSymbol(const char *name,
+ const char *version,
+ int type,
+ SymbolInfo *info) const {
+ return image_.LookupSymbol(name, version, type, info);
+}
+
+bool VDSOSupport::LookupSymbolByAddress(const void *address,
+ SymbolInfo *info_out) const {
+ return image_.LookupSymbolByAddress(address, info_out);
+}
+
+// NOLINT on 'long' because this routine mimics kernel api.
+long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int)
+ void *, void *) {
+#ifdef SYS_getcpu
+ return syscall(SYS_getcpu, cpu, nullptr, nullptr);
+#else
+ // x86_64 never implemented sys_getcpu(), except as a VDSO call.
+ static_cast<void>(cpu); // Avoid an unused argument compiler warning.
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+// Use fast __vdso_getcpu if available.
+long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int)
+ void *x, void *y) {
+ Init();
+ GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
+ ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
+ return (*fn)(cpu, x, y);
+}
+
+// This function must be very fast, and may be called from very
+// low level (e.g. tcmalloc). Hence I avoid things like
+// GoogleOnceInit() and ::operator new.
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+int GetCPU() {
+ unsigned cpu;
+ int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
+ return ret_code == 0 ? cpu : ret_code;
+}
+
+// We need to make sure VDSOSupport::Init() is called before
+// InitGoogle() does any setuid or chroot calls. If VDSOSupport
+// is used in any global constructor, this will happen, since
+// VDSOSupport's constructor calls Init. But if not, we need to
+// ensure it here, with a global constructor of our own. This
+// is an allowed exception to the normal rule against non-trivial
+// global constructors.
+static class VDSOInitHelper {
+ public:
+ VDSOInitHelper() { VDSOSupport::Init(); }
+} vdso_init_helper;
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_HAVE_VDSO_SUPPORT
diff --git a/absl/debugging/internal/vdso_support.h b/absl/debugging/internal/vdso_support.h
new file mode 100644
index 0000000..9895b48
--- /dev/null
+++ b/absl/debugging/internal/vdso_support.h
@@ -0,0 +1,156 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Allow dynamic symbol lookup in the kernel VDSO page.
+//
+// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
+// executable code, which looks like a shared library, but doesn't
+// necessarily exist anywhere on disk, and which gets mmap()ed into
+// every process by kernels which support VDSO, such as 2.6.x for 32-bit
+// executables, and 2.6.24 and above for 64-bit executables.
+//
+// More details could be found here:
+// http://www.trilithium.com/johan/2005/08/linux-gate/
+//
+// VDSOSupport -- a class representing kernel VDSO (if present).
+//
+// Example usage:
+// VDSOSupport vdso;
+// VDSOSupport::SymbolInfo info;
+// typedef (*FN)(unsigned *, void *, void *);
+// FN fn = nullptr;
+// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
+// fn = reinterpret_cast<FN>(info.address);
+// }
+
+#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
+#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/debugging/internal/elf_mem_image.h"
+
+#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+
+#ifdef ABSL_HAVE_VDSO_SUPPORT
+#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set
+#else
+#define ABSL_HAVE_VDSO_SUPPORT 1
+#endif
+
+namespace absl {
+namespace debugging_internal {
+
+// NOTE: this class may be used from within tcmalloc, and can not
+// use any memory allocation routines.
+class VDSOSupport {
+ public:
+ VDSOSupport();
+
+ typedef ElfMemImage::SymbolInfo SymbolInfo;
+ typedef ElfMemImage::SymbolIterator SymbolIterator;
+
+ // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE
+ // depending on how the kernel is built. The kernel is normally built with
+ // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a
+ // compile-time constant.
+#ifdef __powerpc64__
+ enum { kVDSOSymbolType = STT_NOTYPE };
+#else
+ enum { kVDSOSymbolType = STT_FUNC };
+#endif
+
+ // Answers whether we have a vdso at all.
+ bool IsPresent() const { return image_.IsPresent(); }
+
+ // Allow to iterate over all VDSO symbols.
+ SymbolIterator begin() const { return image_.begin(); }
+ SymbolIterator end() const { return image_.end(); }
+
+ // Look up versioned dynamic symbol in the kernel VDSO.
+ // Returns false if VDSO is not present, or doesn't contain given
+ // symbol/version/type combination.
+ // If info_out != nullptr, additional details are filled in.
+ bool LookupSymbol(const char *name, const char *version,
+ int symbol_type, SymbolInfo *info_out) const;
+
+ // Find info about symbol (if any) which overlaps given address.
+ // Returns true if symbol was found; false if VDSO isn't present
+ // or doesn't have a symbol overlapping given address.
+ // If info_out != nullptr, additional details are filled in.
+ bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+ // Used only for testing. Replace real VDSO base with a mock.
+ // Returns previous value of vdso_base_. After you are done testing,
+ // you are expected to call SetBase() with previous value, in order to
+ // reset state to the way it was.
+ const void *SetBase(const void *s);
+
+ // Computes vdso_base_ and returns it. Should be called as early as
+ // possible; before any thread creation, chroot or setuid.
+ static const void *Init();
+
+ private:
+ // image_ represents VDSO ELF image in memory.
+ // image_.ehdr_ == nullptr implies there is no VDSO.
+ ElfMemImage image_;
+
+ // Cached value of auxv AT_SYSINFO_EHDR, computed once.
+ // This is a tri-state:
+ // kInvalidBase => value hasn't been determined yet.
+ // 0 => there is no VDSO.
+ // else => vma of VDSO Elf{32,64}_Ehdr.
+ //
+ // When testing with mock VDSO, low bit is set.
+ // The low bit is always available because vdso_base_ is
+ // page-aligned.
+ static std::atomic<const void *> vdso_base_;
+
+ // NOLINT on 'long' because these routines mimic kernel api.
+ // The 'cache' parameter may be used by some versions of the kernel,
+ // and should be nullptr or point to a static buffer containing at
+ // least two 'long's.
+ static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'.
+ void *unused);
+ static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'.
+ void *unused);
+ typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'.
+ void *unused);
+
+ // This function pointer may point to InitAndGetCPU,
+ // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization.
+ ABSL_CONST_INIT static std::atomic<GetCpuFn> getcpu_fn_;
+
+ friend int GetCPU(void); // Needs access to getcpu_fn_.
+
+ VDSOSupport(const VDSOSupport&) = delete;
+ VDSOSupport& operator=(const VDSOSupport&) = delete;
+};
+
+// Same as sched_getcpu() on later glibc versions.
+// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present,
+// otherwise use syscall(SYS_getcpu,...).
+// May return -1 with errno == ENOSYS if the kernel doesn't
+// support SYS_getcpu.
+int GetCPU();
+
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_HAVE_ELF_MEM_IMAGE
+
+#endif // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
diff --git a/absl/debugging/leak_check.cc b/absl/debugging/leak_check.cc
new file mode 100644
index 0000000..ffe3d1b
--- /dev/null
+++ b/absl/debugging/leak_check.cc
@@ -0,0 +1,49 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Wrappers around lsan_interface functions.
+// When lsan is not linked in, these functions are not available,
+// therefore Abseil code which depends on these functions is conditioned on the
+// definition of LEAK_SANITIZER.
+#include "absl/debugging/leak_check.h"
+
+#ifndef LEAK_SANITIZER
+
+namespace absl {
+bool HaveLeakSanitizer() { return false; }
+void DoIgnoreLeak(const void*) { }
+void RegisterLivePointers(const void*, size_t) { }
+void UnRegisterLivePointers(const void*, size_t) { }
+LeakCheckDisabler::LeakCheckDisabler() { }
+LeakCheckDisabler::~LeakCheckDisabler() { }
+} // namespace absl
+
+#else
+
+#include <sanitizer/lsan_interface.h>
+
+namespace absl {
+bool HaveLeakSanitizer() { return true; }
+void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
+void RegisterLivePointers(const void* ptr, size_t size) {
+ __lsan_register_root_region(ptr, size);
+}
+void UnRegisterLivePointers(const void* ptr, size_t size) {
+ __lsan_unregister_root_region(ptr, size);
+}
+LeakCheckDisabler::LeakCheckDisabler() { __lsan_disable(); }
+LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
+} // namespace absl
+
+#endif // LEAK_SANITIZER
diff --git a/absl/debugging/leak_check.h b/absl/debugging/leak_check.h
new file mode 100644
index 0000000..4d489c5
--- /dev/null
+++ b/absl/debugging/leak_check.h
@@ -0,0 +1,109 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: leak_check.h
+// -----------------------------------------------------------------------------
+//
+// This file contains functions that affect leak checking behavior within
+// targets built with the LeakSanitizer (LSan), a memory leak detector that is
+// integrated within the AddressSanitizer (ASan) as an additional component, or
+// which can be used standalone. LSan and ASan are included (or can be provided)
+// as additional components for most compilers such as Clang, gcc and MSVC.
+// Note: this leak checking API is not yet supported in MSVC.
+// Leak checking is enabled by default in all ASan builds.
+//
+// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// -----------------------------------------------------------------------------
+#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_
+#define ABSL_DEBUGGING_LEAK_CHECK_H_
+
+#include <cstddef>
+
+namespace absl {
+
+// HaveLeakSanitizer()
+//
+// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
+// currently built into this target.
+bool HaveLeakSanitizer();
+
+// DoIgnoreLeak()
+//
+// Implements `IgnoreLeak()` below. This function should usually
+// not be called directly; calling `IgnoreLeak()` is preferred.
+void DoIgnoreLeak(const void* ptr);
+
+// IgnoreLeak()
+//
+// Instruct the leak sanitizer to ignore leak warnings on the object referenced
+// by the passed pointer, as well as all heap objects transitively referenced
+// by it. The passed object pointer can point to either the beginning of the
+// object or anywhere within it.
+//
+// Example:
+//
+// static T* obj = IgnoreLeak(new T(...));
+//
+// If the passed `ptr` does not point to an actively allocated object at the
+// time `IgnoreLeak()` is called, the call is a no-op; if it is actively
+// allocated, the object must not get deallocated later.
+//
+template <typename T>
+T* IgnoreLeak(T* ptr) {
+ DoIgnoreLeak(ptr);
+ return ptr;
+}
+
+// LeakCheckDisabler
+//
+// This helper class indicates that any heap allocations done in the code block
+// covered by the scoped object, which should be allocated on the stack, will
+// not be reported as leaks. Leak check disabling will occur within the code
+// block and any nested function calls within the code block.
+//
+// Example:
+//
+// void Foo() {
+// LeakCheckDisabler disabler;
+// ... code that allocates objects whose leaks should be ignored ...
+// }
+//
+// REQUIRES: Destructor runs in same thread as constructor
+class LeakCheckDisabler {
+ public:
+ LeakCheckDisabler();
+ LeakCheckDisabler(const LeakCheckDisabler&) = delete;
+ LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete;
+ ~LeakCheckDisabler();
+};
+
+// RegisterLivePointers()
+//
+// Registers `ptr[0,size-1]` as pointers to memory that is still actively being
+// referenced and for which leak checking should be ignored. This function is
+// useful if you store pointers in mapped memory, for memory ranges that we know
+// are correct but for which normal analysis would flag as leaked code.
+void RegisterLivePointers(const void* ptr, size_t size);
+
+// UnRegisterLivePointers()
+//
+// Deregisters the pointers previously marked as active in
+// `RegisterLivePointers()`, enabling leak checking of those pointers.
+void UnRegisterLivePointers(const void* ptr, size_t size);
+
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_LEAK_CHECK_H_
diff --git a/absl/debugging/leak_check_disable.cc b/absl/debugging/leak_check_disable.cc
new file mode 100644
index 0000000..924d6e3
--- /dev/null
+++ b/absl/debugging/leak_check_disable.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Disable LeakSanitizer when this file is linked in.
+// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
+extern "C" int __lsan_is_turned_off();
+extern "C" int __lsan_is_turned_off() {
+ return 1;
+}
diff --git a/absl/debugging/leak_check_fail_test.cc b/absl/debugging/leak_check_fail_test.cc
new file mode 100644
index 0000000..2887cea
--- /dev/null
+++ b/absl/debugging/leak_check_fail_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/leak_check.h"
+
+namespace {
+
+TEST(LeakCheckTest, LeakMemory) {
+ // This test is expected to cause lsan failures on program exit. Therefore the
+ // test will be run only by leak_check_test.sh, which will verify a
+ // failed exit code.
+
+ char* foo = strdup("lsan should complain about this leaked string");
+ ABSL_RAW_LOG(INFO, "Should detect leaked std::string %s", foo);
+}
+
+TEST(LeakCheckTest, LeakMemoryAfterDisablerScope) {
+ // This test is expected to cause lsan failures on program exit. Therefore the
+ // test will be run only by external_leak_check_test.sh, which will verify a
+ // failed exit code.
+ { absl::LeakCheckDisabler disabler; }
+ char* foo = strdup("lsan should also complain about this leaked string");
+ ABSL_RAW_LOG(INFO, "Re-enabled leak detection.Should detect leaked std::string %s",
+ foo);
+}
+
+} // namespace
diff --git a/absl/debugging/leak_check_test.cc b/absl/debugging/leak_check_test.cc
new file mode 100644
index 0000000..93a7edd
--- /dev/null
+++ b/absl/debugging/leak_check_test.cc
@@ -0,0 +1,42 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/debugging/leak_check.h"
+
+namespace {
+
+TEST(LeakCheckTest, DetectLeakSanitizer) {
+#ifdef ABSL_EXPECT_LEAK_SANITIZER
+ EXPECT_TRUE(absl::HaveLeakSanitizer());
+#else
+ EXPECT_FALSE(absl::HaveLeakSanitizer());
+#endif
+}
+
+TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
+ auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
+ ABSL_RAW_LOG(INFO, "Ignoring leaked std::string %s", foo->c_str());
+}
+
+TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
+ absl::LeakCheckDisabler disabler;
+ auto foo = new std::string("some std::string leaked while checks are disabled");
+ ABSL_RAW_LOG(INFO, "Ignoring leaked std::string %s", foo->c_str());
+}
+
+} // namespace
diff --git a/absl/debugging/stacktrace.cc b/absl/debugging/stacktrace.cc
new file mode 100644
index 0000000..9de8782
--- /dev/null
+++ b/absl/debugging/stacktrace.cc
@@ -0,0 +1,138 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Produce stack trace.
+//
+// There are three different ways we can try to get the stack trace:
+//
+// 1) Our hand-coded stack-unwinder. This depends on a certain stack
+// layout, which is used by gcc (and those systems using a
+// gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
+// It uses the frame pointer to do its work.
+//
+// 2) The libunwind library. This is still in development, and as a
+// separate library adds a new dependency, but doesn't need a frame
+// pointer. It also doesn't call malloc.
+//
+// 3) The gdb unwinder -- also the one used by the c++ exception code.
+// It's obviously well-tested, but has a fatal flaw: it can call
+// malloc() from the unwinder. This is a problem because we're
+// trying to use the unwinder to instrument malloc().
+//
+// Note: if you add a new implementation here, make sure it works
+// correctly when absl::GetStackTrace() is called with max_depth == 0.
+// Some code may do that.
+
+#include "absl/debugging/stacktrace.h"
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/port.h"
+#include "absl/debugging/internal/stacktrace_config.h"
+
+#if defined(ABSL_STACKTRACE_INL_HEADER)
+#include ABSL_STACKTRACE_INL_HEADER
+#else
+# error Cannot calculate stack trace: will need to write for your environment
+
+# include "absl/debugging/internal/stacktrace_aarch64-inl.inc"
+# include "absl/debugging/internal/stacktrace_arm-inl.inc"
+# include "absl/debugging/internal/stacktrace_generic-inl.inc"
+# include "absl/debugging/internal/stacktrace_powerpc-inl.inc"
+# include "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
+# include "absl/debugging/internal/stacktrace_win32-inl.inc"
+# include "absl/debugging/internal/stacktrace_x86-inl.inc"
+#endif
+
+namespace absl {
+namespace {
+
+typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
+std::atomic<Unwinder> custom;
+
+template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
+ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes,
+ int max_depth, int skip_count,
+ const void* uc,
+ int* min_dropped_frames) {
+ Unwinder f = &UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>;
+ Unwinder g = custom.load(std::memory_order_acquire);
+ if (g != nullptr) f = g;
+
+ // Add 1 to skip count for the unwinder function itself
+ int size = (*f)(result, sizes, max_depth, skip_count + 1, uc,
+ min_dropped_frames);
+ // To disable tail call to (*f)(...)
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+ return size;
+}
+
+} // anonymous namespace
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackFrames(
+ void** result, int* sizes, int max_depth, int skip_count) {
+ return Unwind<true, false>(result, sizes, max_depth, skip_count, nullptr,
+ nullptr);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
+GetStackFramesWithContext(void** result, int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames) {
+ return Unwind<true, true>(result, sizes, max_depth, skip_count, uc,
+ min_dropped_frames);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(
+ void** result, int max_depth, int skip_count) {
+ return Unwind<false, false>(result, nullptr, max_depth, skip_count, nullptr,
+ nullptr);
+}
+
+ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
+GetStackTraceWithContext(void** result, int max_depth, int skip_count,
+ const void* uc, int* min_dropped_frames) {
+ return Unwind<false, true>(result, nullptr, max_depth, skip_count, uc,
+ min_dropped_frames);
+}
+
+void SetStackUnwinder(Unwinder w) {
+ custom.store(w, std::memory_order_release);
+}
+
+int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
+ const void* uc, int* min_dropped_frames) {
+ skip++; // For this function
+ Unwinder f = nullptr;
+ if (sizes == nullptr) {
+ if (uc == nullptr) {
+ f = &UnwindImpl<false, false>;
+ } else {
+ f = &UnwindImpl<false, true>;
+ }
+ } else {
+ if (uc == nullptr) {
+ f = &UnwindImpl<true, false>;
+ } else {
+ f = &UnwindImpl<true, true>;
+ }
+ }
+ volatile int x = 0;
+ int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames);
+ x = 1; (void) x; // To disable tail call to (*f)(...)
+ return n;
+}
+
+} // namespace absl
diff --git a/absl/debugging/stacktrace.h b/absl/debugging/stacktrace.h
new file mode 100644
index 0000000..3fc1c03
--- /dev/null
+++ b/absl/debugging/stacktrace.h
@@ -0,0 +1,225 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: stacktrace.h
+// -----------------------------------------------------------------------------
+//
+// This file contains routines to extract the current stack trace and associated
+// stack frames. These functions are thread-safe and async-signal-safe.
+//
+// Note that stack trace functionality is platform dependent and requires
+// additional support from the compiler/build system in most cases. (That is,
+// this functionality generally only works on platforms/builds that have been
+// specifically configured to support it.)
+//
+// Note: stack traces in Abseil that do not utilize a symbolizer will result in
+// frames consisting of function addresses rather than human-readable function
+// names. (See symbolize.h for information on symbolizing these values.)
+
+#ifndef ABSL_DEBUGGING_STACKTRACE_H_
+#define ABSL_DEBUGGING_STACKTRACE_H_
+
+namespace absl {
+
+// GetStackFrames()
+//
+// Records program counter values for up to `max_depth` frames, skipping the
+// most recent `skip_count` stack frames, and stores their corresponding values
+// and sizes in `results` and `sizes` buffers. (Note that the frame generated
+// for the `absl::GetStackFrames()` routine itself is also skipped.)
+// routine itself.
+//
+// Example:
+//
+// main() { foo(); }
+// foo() { bar(); }
+// bar() {
+// void* result[10];
+// int sizes[10];
+// int depth = absl::GetStackFrames(result, sizes, 10, 1);
+// }
+//
+// The current stack frame would consist of three function calls: `bar()`,
+// `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets
+// `skip_count` to `1`, it will skip the frame for `bar()`, the most recently
+// invoked function call. It will therefore return two program counters and will
+// produce values that map to the following function calls:
+//
+// result[0] foo()
+// result[1] main()
+//
+// (Note: in practice, a few more entries after `main()` may be added to account
+// for startup processes.)
+//
+// Corresponding stack frame sizes will also be recorded:
+//
+// sizes[0] 16
+// sizes[1] 16
+//
+// (Stack frame sizes of `16` above are just for illustration purposes.)
+//
+// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
+// be identified.
+//
+// This routine may return fewer stack frame entries than are
+// available. Also note that `result` and `sizes` must both be non-null.
+extern int GetStackFrames(void** result, int* sizes, int max_depth,
+ int skip_count);
+
+// GetStackFramesWithContext()
+//
+// Records program counter values obtained from a signal handler. Records
+// program counter values for up to `max_depth` frames, skipping the most recent
+// `skip_count` stack frames, and stores their corresponding values and sizes in
+// `results` and `sizes` buffers. (Note that the frame generated for the
+// `absl::GetStackFramesWithContext()` routine itself is also skipped.)
+//
+// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
+// passed to a signal handler registered via the `sa_sigaction` field of a
+// `sigaction` struct. (See
+// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
+// help a stack unwinder to provide a better stack trace under certain
+// conditions. `uc` may safely be null.
+//
+// The `min_dropped_frames` output parameter, if non-null, points to the
+// location to note any dropped stack frames, if any, due to buffer limitations
+// or other reasons. (This value will be set to `0` if no frames were dropped.)
+// The number of total stack frames is guaranteed to be >= skip_count +
+// max_depth + *min_dropped_frames.
+extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+// GetStackTrace()
+//
+// Records program counter values for up to `max_depth` frames, skipping the
+// most recent `skip_count` stack frames, and stores their corresponding values
+// in `results`. Note that this function is similar to `absl::GetStackFrames()`
+// except that it returns the stack trace only, and not stack frame sizes.
+//
+// Example:
+//
+// main() { foo(); }
+// foo() { bar(); }
+// bar() {
+// void* result[10];
+// int depth = absl::GetStackTrace(result, 10, 1);
+// }
+//
+// This produces:
+//
+// result[0] foo
+// result[1] main
+// .... ...
+//
+// `result` must not be null.
+extern int GetStackTrace(void** result, int max_depth, int skip_count);
+
+// GetStackTraceWithContext()
+//
+// Records program counter values obtained from a signal handler. Records
+// program counter values for up to `max_depth` frames, skipping the most recent
+// `skip_count` stack frames, and stores their corresponding values in
+// `results`. (Note that the frame generated for the
+// `absl::GetStackFramesWithContext()` routine itself is also skipped.)
+//
+// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
+// passed to a signal handler registered via the `sa_sigaction` field of a
+// `sigaction` struct. (See
+// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
+// help a stack unwinder to provide a better stack trace under certain
+// conditions. `uc` may safely be null.
+//
+// The `min_dropped_frames` output parameter, if non-null, points to the
+// location to note any dropped stack frames, if any, due to buffer limitations
+// or other reasons. (This value will be set to `0` if no frames were dropped.)
+// The number of total stack frames is guaranteed to be >= skip_count +
+// max_depth + *min_dropped_frames.
+extern int GetStackTraceWithContext(void** result, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+// SetStackUnwinder()
+//
+// Provides a custom function for unwinding stack frames that will be used in
+// place of the default stack unwinder when invoking the static
+// GetStack{Frames,Trace}{,WithContext}() functions above.
+//
+// The arguments passed to the unwinder function will match the
+// arguments passed to `absl::GetStackFramesWithContext()` except that sizes
+// will be non-null iff the caller is interested in frame sizes.
+//
+// If unwinder is set to null, we revert to the default stack-tracing behavior.
+//
+// *****************************************************************************
+// WARNING
+// *****************************************************************************
+//
+// absl::SetStackUnwinder is not suitable for general purpose use. It is
+// provided for custom runtimes.
+// Some things to watch out for when calling `absl::SetStackUnwinder()`:
+//
+// (a) The unwinder may be called from within signal handlers and
+// therefore must be async-signal-safe.
+//
+// (b) Even after a custom stack unwinder has been unregistered, other
+// threads may still be in the process of using that unwinder.
+// Therefore do not clean up any state that may be needed by an old
+// unwinder.
+// *****************************************************************************
+extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes,
+ int max_depth, int skip_count,
+ const void* uc,
+ int* min_dropped_frames));
+
+// DefaultStackUnwinder()
+//
+// Records program counter values of up to `max_depth` frames, skipping the most
+// recent `skip_count` stack frames, and stores their corresponding values in
+// `pcs`. (Note that the frame generated for this call itself is also skipped.)
+// This function acts as a generic stack-unwinder; prefer usage of the more
+// specific `GetStack{Trace,Frames}{,WithContext}()` functions above.
+//
+// If you have set your own stack unwinder (with the `SetStackUnwinder()`
+// function above, you can still get the default stack unwinder by calling
+// `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder
+// and use the default one instead.
+//
+// Because this function is generic, only `pcs` is guaranteed to be non-null
+// upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all
+// be null when called.
+//
+// The semantics are the same as the corresponding `GetStack*()` function in the
+// case where `absl::SetStackUnwinder()` was never called. Equivalents are:
+//
+// null sizes | non-nullptr sizes
+// |==========================================================|
+// null uc | GetStackTrace() | GetStackFrames() |
+// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() |
+// |==========================================================|
+extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth,
+ int skip_count, const void* uc,
+ int* min_dropped_frames);
+
+namespace debugging_internal {
+// Returns true for platforms which are expected to have functioning stack trace
+// implementations. Intended to be used for tests which want to exclude
+// verification of logic known to be broken because stack traces are not
+// working.
+extern bool StackTraceWorksForTest();
+} // namespace debugging_internal
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_STACKTRACE_H_
diff --git a/absl/debugging/symbolize.cc b/absl/debugging/symbolize.cc
new file mode 100644
index 0000000..24e3a7f
--- /dev/null
+++ b/absl/debugging/symbolize.cc
@@ -0,0 +1,28 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/symbolize.h"
+
+#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE)
+#include "absl/debugging/symbolize_elf.inc"
+#elif defined(_WIN32) && defined(_DEBUG)
+// The Windows Symbolizer only works in debug mode. Note that _DEBUG
+// is the macro that defines whether or not MS C-Runtime debug info is
+// available. Note that the PDB files containing the debug info must
+// also be available to the program at runtime for the symbolizer to
+// work.
+#include "absl/debugging/symbolize_win32.inc"
+#else
+#include "absl/debugging/symbolize_unimplemented.inc"
+#endif
diff --git a/absl/debugging/symbolize.h b/absl/debugging/symbolize.h
new file mode 100644
index 0000000..a73dbd9
--- /dev/null
+++ b/absl/debugging/symbolize.h
@@ -0,0 +1,97 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: symbolize.h
+// -----------------------------------------------------------------------------
+//
+// This file configures the Abseil symbolizer for use in converting instruction
+// pointer addresses (program counters) into human-readable names (function
+// calls, etc.) within Abseil code.
+//
+// The symbolizer may be invoked from several sources:
+//
+// * Implicitly, through the installation of an Abseil failure signal handler.
+// (See failure_signal_handler.h for more information.)
+// * By calling `Symbolize()` directly on a program counter you obtain through
+// `absl::GetStackTrace()` or `absl::GetStackFrames()`. (See stacktrace.h
+// for more information.
+// * By calling `Symbolize()` directly on a program counter you obtain through
+// other means (which would be platform-dependent).
+//
+// In all of the above cases, the symbolizer must first be initialized before
+// any program counter values can be symbolized. If you are installing a failure
+// signal handler, initialize the symbolizer before you do so.
+//
+// Example:
+//
+// int main(int argc, char** argv) {
+// // Initialize the Symbolizer before installing the failure signal handler
+// absl::InitializeSymbolizer(argv[0]);
+//
+// // Now you may install the failure signal handler
+// absl::FailureSignalHandlerOptions options;
+// absl::InstallFailureSignalHandler(options);
+//
+// // Start running your main program
+// ...
+// return 0;
+// }
+//
+#ifndef ABSL_DEBUGGING_SYMBOLIZE_H_
+#define ABSL_DEBUGGING_SYMBOLIZE_H_
+
+#include "absl/debugging/internal/symbolize.h"
+
+namespace absl {
+
+// InitializeSymbolizer()
+//
+// Initializes the program counter symbolizer, given the path of the program
+// (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer
+// allows you to read program counters (instruction pointer values) using their
+// human-readable names within output such as stack traces.
+//
+// Example:
+//
+// int main(int argc, char *argv[]) {
+// absl::InitializeSymbolizer(argv[0]);
+// // Now you can use the symbolizer
+// }
+void InitializeSymbolizer(const char* argv0);
+
+// Symbolize()
+//
+// Symbolizes a program counter (instruction pointer value) `pc` and, on
+// success, writes the name to `out`. The symbol name is demangled, if possible.
+// Note that the symbolized name may be truncated and will be NUL-terminated.
+// Demangling is supported for symbols generated by GCC 3.x or newer). Returns
+// `false` on failure.
+//
+// Example:
+//
+// // Print a program counter and its symbol name.
+// static void DumpPCAndSymbol(void *pc) {
+// char tmp[1024];
+// const char *symbol = "(unknown)";
+// if (absl::Symbolize(pc, tmp, sizeof(tmp))) {
+// symbol = tmp;
+// }
+// absl::PrintF("%*p %s\n", pc, symbol);
+// }
+bool Symbolize(const void *pc, char *out, int out_size);
+
+} // namespace absl
+
+#endif // ABSL_DEBUGGING_SYMBOLIZE_H_
diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc
new file mode 100644
index 0000000..14f0c97
--- /dev/null
+++ b/absl/debugging/symbolize_elf.inc
@@ -0,0 +1,1478 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This library provides Symbolize() function that symbolizes program
+// counters to their corresponding symbol names on linux platforms.
+// This library has a minimal implementation of an ELF symbol table
+// reader (i.e. it doesn't depend on libelf, etc.).
+//
+// The algorithm used in Symbolize() is as follows.
+//
+// 1. Go through a list of maps in /proc/self/maps and find the map
+// containing the program counter.
+//
+// 2. Open the mapped file and find a regular symbol table inside.
+// Iterate over symbols in the symbol table and look for the symbol
+// containing the program counter. If such a symbol is found,
+// obtain the symbol name, and demangle the symbol if possible.
+// If the symbol isn't found in the regular symbol table (binary is
+// stripped), try the same thing with a dynamic symbol table.
+//
+// Note that Symbolize() is originally implemented to be used in
+// signal handlers, hence it doesn't use malloc() and other unsafe
+// operations. It should be both thread-safe and async-signal-safe.
+//
+// Implementation note:
+//
+// We don't use heaps but only use stacks. We want to reduce the
+// stack consumption so that the symbolizer can run on small stacks.
+//
+// Here are some numbers collected with GCC 4.1.0 on x86:
+// - sizeof(Elf32_Sym) = 16
+// - sizeof(Elf32_Shdr) = 40
+// - sizeof(Elf64_Sym) = 24
+// - sizeof(Elf64_Shdr) = 64
+//
+// This implementation is intended to be async-signal-safe but uses some
+// functions which are not guaranteed to be so, such as memchr() and
+// memmove(). We assume they are async-signal-safe.
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <link.h> // For ElfW() macro.
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cinttypes>
+#include <climits>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "absl/base/casts.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/low_level_alloc.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/port.h"
+#include "absl/debugging/internal/demangle.h"
+#include "absl/debugging/internal/vdso_support.h"
+
+namespace absl {
+
+// Value of argv[0]. Used by MaybeInitializeObjFile().
+static char *argv0_value = nullptr;
+
+void InitializeSymbolizer(const char *argv0) {
+ if (argv0_value != nullptr) {
+ free(argv0_value);
+ argv0_value = nullptr;
+ }
+ if (argv0 != nullptr && argv0[0] != '\0') {
+ argv0_value = strdup(argv0);
+ }
+}
+
+namespace debugging_internal {
+namespace {
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn) \
+ do { \
+ } while ((fn) < 0 && errno == EINTR)
+
+// On Linux, ELF_ST_* are defined in <linux/elf.h>. To make this portable
+// we define our own ELF_ST_BIND and ELF_ST_TYPE if not available.
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND(info) (((unsigned char)(info)) >> 4)
+#endif
+
+#ifndef ELF_ST_TYPE
+#define ELF_ST_TYPE(info) (((unsigned char)(info)) & 0xF)
+#endif
+
+// Some platforms use a special .opd section to store function pointers.
+const char kOpdSectionName[] = ".opd";
+
+#if (defined(__powerpc__) && !(_CALL_ELF > 1)) || defined(__ia64)
+// Use opd section for function descriptors on these platforms, the function
+// address is the first word of the descriptor.
+enum { kPlatformUsesOPDSections = 1 };
+#else // not PPC or IA64
+enum { kPlatformUsesOPDSections = 0 };
+#endif
+
+// This works for PowerPC & IA64 only. A function descriptor consist of two
+// pointers and the first one is the function's entry.
+const size_t kFunctionDescriptorSize = sizeof(void *) * 2;
+
+const int kMaxDecorators = 10; // Seems like a reasonable upper limit.
+
+struct InstalledSymbolDecorator {
+ SymbolDecorator fn;
+ void *arg;
+ int ticket;
+};
+
+int g_num_decorators;
+InstalledSymbolDecorator g_decorators[kMaxDecorators];
+
+struct FileMappingHint {
+ const void *start;
+ const void *end;
+ uint64_t offset;
+ const char *filename;
+};
+
+// Protects g_decorators.
+// We are using SpinLock and not a Mutex here, because we may be called
+// from inside Mutex::Lock itself, and it prohibits recursive calls.
+// This happens in e.g. base/stacktrace_syscall_unittest.
+// Moreover, we are using only TryLock(), if the decorator list
+// is being modified (is busy), we skip all decorators, and possibly
+// loose some info. Sorry, that's the best we could do.
+base_internal::SpinLock g_decorators_mu(base_internal::kLinkerInitialized);
+
+const int kMaxFileMappingHints = 8;
+int g_num_file_mapping_hints;
+FileMappingHint g_file_mapping_hints[kMaxFileMappingHints];
+// Protects g_file_mapping_hints.
+base_internal::SpinLock g_file_mapping_mu(base_internal::kLinkerInitialized);
+
+// Async-signal-safe function to zero a buffer.
+// memset() is not guaranteed to be async-signal-safe.
+static void SafeMemZero(void* p, size_t size) {
+ unsigned char *c = static_cast<unsigned char *>(p);
+ while (size--) {
+ *c++ = 0;
+ }
+}
+
+struct ObjFile {
+ ObjFile()
+ : filename(nullptr),
+ start_addr(nullptr),
+ end_addr(nullptr),
+ offset(0),
+ fd(-1),
+ elf_type(-1) {
+ SafeMemZero(&elf_header, sizeof(elf_header));
+ }
+
+ char *filename;
+ const void *start_addr;
+ const void *end_addr;
+ uint64_t offset;
+
+ // The following fields are initialized on the first access to the
+ // object file.
+ int fd;
+ int elf_type;
+ ElfW(Ehdr) elf_header;
+};
+
+// Build 4-way associative cache for symbols. Within each cache line, symbols
+// are replaced in LRU order.
+enum {
+ ASSOCIATIVITY = 4,
+};
+struct SymbolCacheLine {
+ const void *pc[ASSOCIATIVITY];
+ char *name[ASSOCIATIVITY];
+
+ // age[i] is incremented when a line is accessed. it's reset to zero if the
+ // i'th entry is read.
+ uint32_t age[ASSOCIATIVITY];
+};
+
+// ---------------------------------------------------------------
+// An async-signal-safe arena for LowLevelAlloc
+static std::atomic<base_internal::LowLevelAlloc::Arena *> g_sig_safe_arena;
+
+static base_internal::LowLevelAlloc::Arena *SigSafeArena() {
+ return g_sig_safe_arena.load(std::memory_order_acquire);
+}
+
+static void InitSigSafeArena() {
+ if (SigSafeArena() == nullptr) {
+ base_internal::LowLevelAlloc::Arena *new_arena =
+ base_internal::LowLevelAlloc::NewArena(
+ base_internal::LowLevelAlloc::kAsyncSignalSafe);
+ base_internal::LowLevelAlloc::Arena *old_value = nullptr;
+ if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ // We lost a race to allocate an arena; deallocate.
+ base_internal::LowLevelAlloc::DeleteArena(new_arena);
+ }
+ }
+}
+
+// ---------------------------------------------------------------
+// An AddrMap is a vector of ObjFile, using SigSafeArena() for allocation.
+
+class AddrMap {
+ public:
+ AddrMap() : size_(0), allocated_(0), obj_(nullptr) {}
+ ~AddrMap() { base_internal::LowLevelAlloc::Free(obj_); }
+ int Size() const { return size_; }
+ ObjFile *At(int i) { return &obj_[i]; }
+ ObjFile *Add();
+ void Clear();
+
+ private:
+ int size_; // count of valid elements (<= allocated_)
+ int allocated_; // count of allocated elements
+ ObjFile *obj_; // array of allocated_ elements
+ AddrMap(const AddrMap &) = delete;
+ AddrMap &operator=(const AddrMap &) = delete;
+};
+
+void AddrMap::Clear() {
+ for (int i = 0; i != size_; i++) {
+ At(i)->~ObjFile();
+ }
+ size_ = 0;
+}
+
+ObjFile *AddrMap::Add() {
+ if (size_ == allocated_) {
+ int new_allocated = allocated_ * 2 + 50;
+ ObjFile *new_obj_ =
+ static_cast<ObjFile *>(base_internal::LowLevelAlloc::AllocWithArena(
+ new_allocated * sizeof(*new_obj_), SigSafeArena()));
+ if (obj_) {
+ memcpy(new_obj_, obj_, allocated_ * sizeof(*new_obj_));
+ base_internal::LowLevelAlloc::Free(obj_);
+ }
+ obj_ = new_obj_;
+ allocated_ = new_allocated;
+ }
+ return new (&obj_[size_++]) ObjFile;
+}
+
+// ---------------------------------------------------------------
+
+enum FindSymbolResult { SYMBOL_NOT_FOUND = 1, SYMBOL_TRUNCATED, SYMBOL_FOUND };
+
+class Symbolizer {
+ public:
+ Symbolizer();
+ ~Symbolizer();
+ const char *GetSymbol(const void *const pc);
+
+ private:
+ char *CopyString(const char *s) {
+ int len = strlen(s);
+ char *dst = static_cast<char *>(
+ base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena()));
+ ABSL_RAW_CHECK(dst != nullptr, "out of memory");
+ memcpy(dst, s, len + 1);
+ return dst;
+ }
+ ObjFile *FindObjFile(const void *const start,
+ size_t size) ABSL_ATTRIBUTE_NOINLINE;
+ static bool RegisterObjFile(const char *filename,
+ const void *const start_addr,
+ const void *const end_addr, uint64_t offset,
+ void *arg);
+ SymbolCacheLine *GetCacheLine(const void *const pc);
+ const char *FindSymbolInCache(const void *const pc);
+ const char *InsertSymbolInCache(const void *const pc, const char *name);
+ void AgeSymbols(SymbolCacheLine *line);
+ void ClearAddrMap();
+ FindSymbolResult GetSymbolFromObjectFile(const ObjFile &obj,
+ const void *const pc,
+ const ptrdiff_t relocation,
+ char *out, int out_size,
+ char *tmp_buf, int tmp_buf_size);
+
+ enum {
+ SYMBOL_BUF_SIZE = 3072,
+ TMP_BUF_SIZE = 1024,
+ SYMBOL_CACHE_LINES = 128,
+ };
+
+ AddrMap addr_map_;
+
+ bool ok_;
+ bool addr_map_read_;
+
+ char symbol_buf_[SYMBOL_BUF_SIZE];
+
+ // tmp_buf_ will be used to store arrays of ElfW(Shdr) and ElfW(Sym)
+ // so we ensure that tmp_buf_ is properly aligned to store either.
+ alignas(16) char tmp_buf_[TMP_BUF_SIZE];
+ static_assert(alignof(ElfW(Shdr)) <= 16,
+ "alignment of tmp buf too small for Shdr");
+ static_assert(alignof(ElfW(Sym)) <= 16,
+ "alignment of tmp buf too small for Sym");
+
+ SymbolCacheLine symbol_cache_[SYMBOL_CACHE_LINES];
+};
+
+static std::atomic<Symbolizer *> g_cached_symbolizer;
+
+} // namespace
+
+static int SymbolizerSize() {
+#if defined(__wasm__) || defined(__asmjs__)
+ int pagesize = getpagesize();
+#else
+ int pagesize = sysconf(_SC_PAGESIZE);
+#endif
+ return ((sizeof(Symbolizer) - 1) / pagesize + 1) * pagesize;
+}
+
+// Return (and set null) g_cached_symbolized_state if it is not null.
+// Otherwise return a new symbolizer.
+static Symbolizer *AllocateSymbolizer() {
+ InitSigSafeArena();
+ Symbolizer *symbolizer =
+ g_cached_symbolizer.exchange(nullptr, std::memory_order_acquire);
+ if (symbolizer != nullptr) {
+ return symbolizer;
+ }
+ return new (base_internal::LowLevelAlloc::AllocWithArena(
+ SymbolizerSize(), SigSafeArena())) Symbolizer();
+}
+
+// Set g_cached_symbolize_state to s if it is null, otherwise
+// delete s.
+static void FreeSymbolizer(Symbolizer *s) {
+ Symbolizer *old_cached_symbolizer = nullptr;
+ if (!g_cached_symbolizer.compare_exchange_strong(old_cached_symbolizer, s,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ s->~Symbolizer();
+ base_internal::LowLevelAlloc::Free(s);
+ }
+}
+
+Symbolizer::Symbolizer() : ok_(true), addr_map_read_(false) {
+ for (SymbolCacheLine &symbol_cache_line : symbol_cache_) {
+ for (size_t j = 0; j < ABSL_ARRAYSIZE(symbol_cache_line.name); ++j) {
+ symbol_cache_line.pc[j] = nullptr;
+ symbol_cache_line.name[j] = nullptr;
+ symbol_cache_line.age[j] = 0;
+ }
+ }
+}
+
+Symbolizer::~Symbolizer() {
+ for (SymbolCacheLine &symbol_cache_line : symbol_cache_) {
+ for (char *s : symbol_cache_line.name) {
+ base_internal::LowLevelAlloc::Free(s);
+ }
+ }
+ ClearAddrMap();
+}
+
+// We don't use assert() since it's not guaranteed to be
+// async-signal-safe. Instead we define a minimal assertion
+// macro. So far, we don't need pretty printing for __FILE__, etc.
+#define SAFE_ASSERT(expr) ((expr) ? static_cast<void>(0) : abort())
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR. On
+// success, return the number of bytes read. Otherwise, return -1.
+static ssize_t ReadPersistent(int fd, void *buf, size_t count) {
+ SAFE_ASSERT(fd >= 0);
+ SAFE_ASSERT(count <= SSIZE_MAX);
+ char *buf0 = reinterpret_cast<char *>(buf);
+ size_t num_bytes = 0;
+ while (num_bytes < count) {
+ ssize_t len;
+ NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+ if (len < 0) { // There was an error other than EINTR.
+ ABSL_RAW_LOG(WARNING, "read failed: errno=%d", errno);
+ return -1;
+ }
+ if (len == 0) { // Reached EOF.
+ break;
+ }
+ num_bytes += len;
+ }
+ SAFE_ASSERT(num_bytes <= count);
+ return static_cast<ssize_t>(num_bytes);
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf". On success,
+// return the number of bytes read. Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void *buf, const size_t count,
+ const off_t offset) {
+ off_t off = lseek(fd, offset, SEEK_SET);
+ if (off == (off_t)-1) {
+ ABSL_RAW_LOG(WARNING, "lseek(%d, %ju, SEEK_SET) failed: errno=%d", fd,
+ static_cast<uintmax_t>(offset), errno);
+ return -1;
+ }
+ return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR. On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void *buf, const size_t count,
+ const off_t offset) {
+ ssize_t len = ReadFromOffset(fd, buf, count, offset);
+ return len >= 0 && static_cast<size_t>(len) == count;
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return -1;
+ }
+ if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+ return -1;
+ }
+ return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true. Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ABSL_ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(
+ const int fd, ElfW(Half) sh_num, const off_t sh_offset, ElfW(Word) type,
+ ElfW(Shdr) * out, char *tmp_buf, int tmp_buf_size) {
+ ElfW(Shdr) *buf = reinterpret_cast<ElfW(Shdr) *>(tmp_buf);
+ const int buf_entries = tmp_buf_size / sizeof(buf[0]);
+ const int buf_bytes = buf_entries * sizeof(buf[0]);
+
+ for (int i = 0; i < sh_num;) {
+ const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+ const ssize_t num_bytes_to_read =
+ (buf_bytes > num_bytes_left) ? num_bytes_left : buf_bytes;
+ const off_t offset = sh_offset + i * sizeof(buf[0]);
+ const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read, offset);
+ if (len % sizeof(buf[0]) != 0) {
+ ABSL_RAW_LOG(
+ WARNING,
+ "Reading %zd bytes from offset %ju returned %zd which is not a "
+ "multiple of %zu.",
+ num_bytes_to_read, static_cast<uintmax_t>(offset), len,
+ sizeof(buf[0]));
+ return false;
+ }
+ const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+ SAFE_ASSERT(num_headers_in_buf <= buf_entries);
+ for (int j = 0; j < num_headers_in_buf; ++j) {
+ if (buf[j].sh_type == type) {
+ *out = buf[j];
+ return true;
+ }
+ }
+ i += num_headers_in_buf;
+ }
+ return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+bool ForEachSection(int fd,
+ const std::function<bool(const std::string &name,
+ const ElfW(Shdr) &)> &callback) {
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return false;
+ }
+
+ ElfW(Shdr) shstrtab;
+ off_t shstrtab_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx);
+ if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+ return false;
+ }
+
+ for (int i = 0; i < elf_header.e_shnum; ++i) {
+ ElfW(Shdr) out;
+ off_t section_header_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * i);
+ if (!ReadFromOffsetExact(fd, &out, sizeof(out), section_header_offset)) {
+ return false;
+ }
+ off_t name_offset = shstrtab.sh_offset + out.sh_name;
+ char header_name[kMaxSectionNameLen + 1];
+ ssize_t n_read =
+ ReadFromOffset(fd, &header_name, kMaxSectionNameLen, name_offset);
+ if (n_read == -1) {
+ return false;
+ } else if (n_read > kMaxSectionNameLen) {
+ // Long read?
+ return false;
+ }
+ header_name[n_read] = '\0';
+
+ std::string name(header_name);
+ if (!callback(name, out)) {
+ break;
+ }
+ }
+ return true;
+}
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) * out) {
+ char header_name[kMaxSectionNameLen];
+ if (sizeof(header_name) < name_len) {
+ ABSL_RAW_LOG(WARNING,
+ "Section name '%s' is too long (%zu); "
+ "section will not be found (even if present).",
+ name, name_len);
+ // No point in even trying.
+ return false;
+ }
+
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return false;
+ }
+
+ ElfW(Shdr) shstrtab;
+ off_t shstrtab_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx);
+ if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+ return false;
+ }
+
+ for (int i = 0; i < elf_header.e_shnum; ++i) {
+ off_t section_header_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * i);
+ if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+ return false;
+ }
+ off_t name_offset = shstrtab.sh_offset + out->sh_name;
+ ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+ if (n_read < 0) {
+ return false;
+ } else if (static_cast<size_t>(n_read) != name_len) {
+ // Short read -- name could be at end of file.
+ continue;
+ }
+ if (memcmp(header_name, name, name_len) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Compare symbols at in the same address.
+// Return true if we should pick symbol1.
+static bool ShouldPickFirstSymbol(const ElfW(Sym) & symbol1,
+ const ElfW(Sym) & symbol2) {
+ // If one of the symbols is weak and the other is not, pick the one
+ // this is not a weak symbol.
+ char bind1 = ELF_ST_BIND(symbol1.st_info);
+ char bind2 = ELF_ST_BIND(symbol1.st_info);
+ if (bind1 == STB_WEAK && bind2 != STB_WEAK) return false;
+ if (bind2 == STB_WEAK && bind1 != STB_WEAK) return true;
+
+ // If one of the symbols has zero size and the other is not, pick the
+ // one that has non-zero size.
+ if (symbol1.st_size != 0 && symbol2.st_size == 0) {
+ return true;
+ }
+ if (symbol1.st_size == 0 && symbol2.st_size != 0) {
+ return false;
+ }
+
+ // If one of the symbols has no type and the other is not, pick the
+ // one that has a type.
+ char type1 = ELF_ST_TYPE(symbol1.st_info);
+ char type2 = ELF_ST_TYPE(symbol1.st_info);
+ if (type1 != STT_NOTYPE && type2 == STT_NOTYPE) {
+ return true;
+ }
+ if (type1 == STT_NOTYPE && type2 != STT_NOTYPE) {
+ return false;
+ }
+
+ // Pick the first one, if we still cannot decide.
+ return true;
+}
+
+// Return true if an address is inside a section.
+static bool InSection(const void *address, const ElfW(Shdr) * section) {
+ const char *start = reinterpret_cast<const char *>(section->sh_addr);
+ size_t size = static_cast<size_t>(section->sh_size);
+ return start <= address && address < (start + size);
+}
+
+static const char *ComputeOffset(const char *base, ptrdiff_t offset) {
+ // Note: cast to uintptr_t to avoid undefined behavior when base evaluates to
+ // zero and offset is non-zero.
+ return reinterpret_cast<const char *>(
+ reinterpret_cast<uintptr_t>(base) + offset);
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc". If the symbol is found, and its name fits in
+// out_size, the name is written into out and SYMBOL_FOUND is returned.
+// If the name does not fit, truncated name is written into out,
+// and SYMBOL_TRUNCATED is returned. Out is NUL-terminated.
+// If the symbol is not found, SYMBOL_NOT_FOUND is returned;
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
+ const void *const pc, const int fd, char *out, int out_size,
+ ptrdiff_t relocation, const ElfW(Shdr) * strtab, const ElfW(Shdr) * symtab,
+ const ElfW(Shdr) * opd, char *tmp_buf, int tmp_buf_size) {
+ if (symtab == nullptr) {
+ return SYMBOL_NOT_FOUND;
+ }
+
+ // Read multiple symbols at once to save read() calls.
+ ElfW(Sym) *buf = reinterpret_cast<ElfW(Sym) *>(tmp_buf);
+ const int buf_entries = tmp_buf_size / sizeof(buf[0]);
+
+ const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+
+ // On platforms using an .opd section (PowerPC & IA64), a function symbol
+ // has the address of a function descriptor, which contains the real
+ // starting address. However, we do not always want to use the real
+ // starting address because we sometimes want to symbolize a function
+ // pointer into the .opd section, e.g. FindSymbol(&foo,...).
+ const bool pc_in_opd =
+ kPlatformUsesOPDSections && opd != nullptr && InSection(pc, opd);
+ const bool deref_function_descriptor_pointer =
+ kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd;
+
+ ElfW(Sym) best_match;
+ SafeMemZero(&best_match, sizeof(best_match));
+ bool found_match = false;
+ for (int i = 0; i < num_symbols;) {
+ off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+ const int num_remaining_symbols = num_symbols - i;
+ const int entries_in_chunk = std::min(num_remaining_symbols, buf_entries);
+ const int bytes_in_chunk = entries_in_chunk * sizeof(buf[0]);
+ const ssize_t len = ReadFromOffset(fd, buf, bytes_in_chunk, offset);
+ SAFE_ASSERT(len % sizeof(buf[0]) == 0);
+ const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+ SAFE_ASSERT(num_symbols_in_buf <= entries_in_chunk);
+ for (int j = 0; j < num_symbols_in_buf; ++j) {
+ const ElfW(Sym) &symbol = buf[j];
+
+ // For a DSO, a symbol address is relocated by the loading address.
+ // We keep the original address for opd redirection below.
+ const char *const original_start_address =
+ reinterpret_cast<const char *>(symbol.st_value);
+ const char *start_address =
+ ComputeOffset(original_start_address, relocation);
+
+ if (deref_function_descriptor_pointer &&
+ InSection(original_start_address, opd)) {
+ // The opd section is mapped into memory. Just dereference
+ // start_address to get the first double word, which points to the
+ // function entry.
+ start_address = *reinterpret_cast<const char *const *>(start_address);
+ }
+
+ // If pc is inside the .opd section, it points to a function descriptor.
+ const size_t size = pc_in_opd ? kFunctionDescriptorSize : symbol.st_size;
+ const void *const end_address = ComputeOffset(start_address, size);
+ if (symbol.st_value != 0 && // Skip null value symbols.
+ symbol.st_shndx != 0 && // Skip undefined symbols.
+#ifdef STT_TLS
+ ELF_ST_TYPE(symbol.st_info) != STT_TLS && // Skip thread-local data.
+#endif // STT_TLS
+ ((start_address <= pc && pc < end_address) ||
+ (start_address == pc && pc == end_address))) {
+ if (!found_match || ShouldPickFirstSymbol(symbol, best_match)) {
+ found_match = true;
+ best_match = symbol;
+ }
+ }
+ }
+ i += num_symbols_in_buf;
+ }
+
+ if (found_match) {
+ const size_t off = strtab->sh_offset + best_match.st_name;
+ const ssize_t n_read = ReadFromOffset(fd, out, out_size, off);
+ if (n_read <= 0) {
+ // This should never happen.
+ ABSL_RAW_LOG(WARNING,
+ "Unable to read from fd %d at offset %zu: n_read = %zd", fd,
+ off, n_read);
+ return SYMBOL_NOT_FOUND;
+ }
+ ABSL_RAW_CHECK(n_read <= out_size, "ReadFromOffset read too much data.");
+
+ // strtab->sh_offset points into .strtab-like section that contains
+ // NUL-terminated strings: '\0foo\0barbaz\0...".
+ //
+ // sh_offset+st_name points to the start of symbol name, but we don't know
+ // how long the symbol is, so we try to read as much as we have space for,
+ // and usually over-read (i.e. there is a NUL somewhere before n_read).
+ if (memchr(out, '\0', n_read) == nullptr) {
+ // Either out_size was too small (n_read == out_size and no NUL), or
+ // we tried to read past the EOF (n_read < out_size) and .strtab is
+ // corrupt (missing terminating NUL; should never happen for valid ELF).
+ out[n_read - 1] = '\0';
+ return SYMBOL_TRUNCATED;
+ }
+ return SYMBOL_FOUND;
+ }
+
+ return SYMBOL_NOT_FOUND;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd". Process
+// both regular and dynamic symbol tables if necessary.
+// See FindSymbol() comment for description of return value.
+FindSymbolResult Symbolizer::GetSymbolFromObjectFile(
+ const ObjFile &obj, const void *const pc, const ptrdiff_t relocation,
+ char *out, int out_size, char *tmp_buf, int tmp_buf_size) {
+ ElfW(Shdr) symtab;
+ ElfW(Shdr) strtab;
+ ElfW(Shdr) opd;
+ ElfW(Shdr) *opd_ptr = nullptr;
+
+ // On platforms using an .opd sections for function descriptor, read
+ // the section header. The .opd section is in data segment and should be
+ // loaded but we check that it is mapped just to be extra careful.
+ if (kPlatformUsesOPDSections) {
+ if (GetSectionHeaderByName(obj.fd, kOpdSectionName,
+ sizeof(kOpdSectionName) - 1, &opd) &&
+ FindObjFile(reinterpret_cast<const char *>(opd.sh_addr) + relocation,
+ opd.sh_size) != nullptr) {
+ opd_ptr = &opd;
+ } else {
+ return SYMBOL_NOT_FOUND;
+ }
+ }
+
+ // Consult a regular symbol table, then fall back to the dynamic symbol table.
+ for (const auto symbol_table_type : {SHT_SYMTAB, SHT_DYNSYM}) {
+ if (!GetSectionHeaderByType(obj.fd, obj.elf_header.e_shnum,
+ obj.elf_header.e_shoff, symbol_table_type,
+ &symtab, tmp_buf, tmp_buf_size)) {
+ continue;
+ }
+ if (!ReadFromOffsetExact(
+ obj.fd, &strtab, sizeof(strtab),
+ obj.elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
+ continue;
+ }
+ const FindSymbolResult rc =
+ FindSymbol(pc, obj.fd, out, out_size, relocation, &strtab, &symtab,
+ opd_ptr, tmp_buf, tmp_buf_size);
+ if (rc != SYMBOL_NOT_FOUND) {
+ return rc;
+ }
+ }
+
+ return SYMBOL_NOT_FOUND;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+class FileDescriptor {
+ public:
+ explicit FileDescriptor(int fd) : fd_(fd) {}
+ FileDescriptor(const FileDescriptor &) = delete;
+ FileDescriptor &operator=(const FileDescriptor &) = delete;
+
+ ~FileDescriptor() {
+ if (fd_ >= 0) {
+ NO_INTR(close(fd_));
+ }
+ }
+
+ int get() const { return fd_; }
+
+ private:
+ const int fd_;
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+ public:
+ explicit LineReader(int fd, char *buf, int buf_len)
+ : fd_(fd),
+ buf_len_(buf_len),
+ buf_(buf),
+ bol_(buf),
+ eol_(buf),
+ eod_(buf) {}
+
+ LineReader(const LineReader &) = delete;
+ LineReader &operator=(const LineReader &) = delete;
+
+ // Read '\n'-terminated line from file. On success, modify "bol"
+ // and "eol", then return true. Otherwise, return false.
+ //
+ // Note: if the last line doesn't end with '\n', the line will be
+ // dropped. It's an intentional behavior to make the code simple.
+ bool ReadLine(const char **bol, const char **eol) {
+ if (BufferIsEmpty()) { // First time.
+ const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+ if (num_bytes <= 0) { // EOF or error.
+ return false;
+ }
+ eod_ = buf_ + num_bytes;
+ bol_ = buf_;
+ } else {
+ bol_ = eol_ + 1; // Advance to the next line in the buffer.
+ SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_".
+ if (!HasCompleteLine()) {
+ const int incomplete_line_length = eod_ - bol_;
+ // Move the trailing incomplete line to the beginning.
+ memmove(buf_, bol_, incomplete_line_length);
+ // Read text from file and append it.
+ char *const append_pos = buf_ + incomplete_line_length;
+ const int capacity_left = buf_len_ - incomplete_line_length;
+ const ssize_t num_bytes =
+ ReadPersistent(fd_, append_pos, capacity_left);
+ if (num_bytes <= 0) { // EOF or error.
+ return false;
+ }
+ eod_ = append_pos + num_bytes;
+ bol_ = buf_;
+ }
+ }
+ eol_ = FindLineFeed();
+ if (eol_ == nullptr) { // '\n' not found. Malformed line.
+ return false;
+ }
+ *eol_ = '\0'; // Replace '\n' with '\0'.
+
+ *bol = bol_;
+ *eol = eol_;
+ return true;
+ }
+
+ private:
+ char *FindLineFeed() const {
+ return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+ }
+
+ bool BufferIsEmpty() const { return buf_ == eod_; }
+
+ bool HasCompleteLine() const {
+ return !BufferIsEmpty() && FindLineFeed() != nullptr;
+ }
+
+ const int fd_;
+ const int buf_len_;
+ char *const buf_;
+ char *bol_;
+ char *eol_;
+ const char *eod_; // End of data in "buf_".
+};
+} // namespace
+
+// Place the hex number read from "start" into "*hex". The pointer to
+// the first non-hex character or "end" is returned.
+static const char *GetHex(const char *start, const char *end,
+ uint64_t *const value) {
+ uint64_t hex = 0;
+ const char *p;
+ for (p = start; p < end; ++p) {
+ int ch = *p;
+ if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') ||
+ (ch >= 'a' && ch <= 'f')) {
+ hex = (hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+ } else { // Encountered the first non-hex character.
+ break;
+ }
+ }
+ SAFE_ASSERT(p <= end);
+ *value = hex;
+ return p;
+}
+
+static const char *GetHex(const char *start, const char *end,
+ const void **const addr) {
+ uint64_t hex = 0;
+ const char *p = GetHex(start, end, &hex);
+ *addr = reinterpret_cast<void *>(hex);
+ return p;
+}
+
+// Normally we are only interested in "r?x" maps.
+// On the PowerPC, function pointers point to descriptors in the .opd
+// section. The descriptors themselves are not executable code, so
+// we need to relax the check below to "r??".
+static bool ShouldUseMapping(const char *const flags) {
+ return flags[0] == 'r' && (kPlatformUsesOPDSections || flags[2] == 'x');
+}
+
+// Read /proc/self/maps and run "callback" for each mmapped file found. If
+// "callback" returns false, stop scanning and return true. Else continue
+// scanning /proc/self/maps. Return true if no parse error is found.
+static ABSL_ATTRIBUTE_NOINLINE bool ReadAddrMap(
+ bool (*callback)(const char *filename, const void *const start_addr,
+ const void *const end_addr, uint64_t offset, void *arg),
+ void *arg, void *tmp_buf, int tmp_buf_size) {
+ // Use /proc/self/task/<pid>/maps instead of /proc/self/maps. The latter
+ // requires kernel to stop all threads, and is significantly slower when there
+ // are 1000s of threads.
+ char maps_path[80];
+ snprintf(maps_path, sizeof(maps_path), "/proc/self/task/%d/maps", getpid());
+
+ int maps_fd;
+ NO_INTR(maps_fd = open(maps_path, O_RDONLY));
+ FileDescriptor wrapped_maps_fd(maps_fd);
+ if (wrapped_maps_fd.get() < 0) {
+ ABSL_RAW_LOG(WARNING, "%s: errno=%d", maps_path, errno);
+ return false;
+ }
+
+ // Iterate over maps and look for the map containing the pc. Then
+ // look into the symbol tables inside.
+ LineReader reader(wrapped_maps_fd.get(), static_cast<char *>(tmp_buf),
+ tmp_buf_size);
+ while (true) {
+ const char *cursor;
+ const char *eol;
+ if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line.
+ break;
+ }
+
+ const char *line = cursor;
+ const void *start_address;
+ // Start parsing line in /proc/self/maps. Here is an example:
+ //
+ // 08048000-0804c000 r-xp 00000000 08:01 2142121 /bin/cat
+ //
+ // We want start address (08048000), end address (0804c000), flags
+ // (r-xp) and file name (/bin/cat).
+
+ // Read start address.
+ cursor = GetHex(cursor, eol, &start_address);
+ if (cursor == eol || *cursor != '-') {
+ ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line);
+ return false;
+ }
+ ++cursor; // Skip '-'.
+
+ // Read end address.
+ const void *end_address;
+ cursor = GetHex(cursor, eol, &end_address);
+ if (cursor == eol || *cursor != ' ') {
+ ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line);
+ return false;
+ }
+ ++cursor; // Skip ' '.
+
+ // Read flags. Skip flags until we encounter a space or eol.
+ const char *const flags_start = cursor;
+ while (cursor < eol && *cursor != ' ') {
+ ++cursor;
+ }
+ // We expect at least four letters for flags (ex. "r-xp").
+ if (cursor == eol || cursor < flags_start + 4) {
+ ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps: %s", line);
+ return false;
+ }
+
+ // Check flags.
+ if (!ShouldUseMapping(flags_start)) {
+ continue; // We skip this map.
+ }
+ ++cursor; // Skip ' '.
+
+ // Read file offset.
+ uint64_t offset;
+ cursor = GetHex(cursor, eol, &offset);
+ ++cursor; // Skip ' '.
+
+ // Skip to file name. "cursor" now points to dev. We need to skip at least
+ // two spaces for dev and inode.
+ int num_spaces = 0;
+ while (cursor < eol) {
+ if (*cursor == ' ') {
+ ++num_spaces;
+ } else if (num_spaces >= 2) {
+ // The first non-space character after skipping two spaces
+ // is the beginning of the file name.
+ break;
+ }
+ ++cursor;
+ }
+
+ // Check whether this entry corresponds to our hint table for the true
+ // filename.
+ bool hinted =
+ GetFileMappingHint(&start_address, &end_address, &offset, &cursor);
+ if (!hinted && (cursor == eol || cursor[0] == '[')) {
+ // not an object file, typically [vdso] or [vsyscall]
+ continue;
+ }
+ if (!callback(cursor, start_address, end_address, offset, arg)) break;
+ }
+ return true;
+}
+
+// Find the objfile mapped in address region containing [addr, addr + len).
+ObjFile *Symbolizer::FindObjFile(const void *const addr, size_t len) {
+ for (int i = 0; i < 2; ++i) {
+ if (!ok_) return nullptr;
+
+ // Read /proc/self/maps if necessary
+ if (!addr_map_read_) {
+ addr_map_read_ = true;
+ if (!ReadAddrMap(RegisterObjFile, this, tmp_buf_, TMP_BUF_SIZE)) {
+ ok_ = false;
+ return nullptr;
+ }
+ }
+
+ int lo = 0;
+ int hi = addr_map_.Size();
+ while (lo < hi) {
+ int mid = (lo + hi) / 2;
+ if (addr < addr_map_.At(mid)->end_addr) {
+ hi = mid;
+ } else {
+ lo = mid + 1;
+ }
+ }
+ if (lo != addr_map_.Size()) {
+ ObjFile *obj = addr_map_.At(lo);
+ SAFE_ASSERT(obj->end_addr > addr);
+ if (addr >= obj->start_addr &&
+ reinterpret_cast<const char *>(addr) + len <= obj->end_addr)
+ return obj;
+ }
+
+ // The address mapping may have changed since it was last read. Retry.
+ ClearAddrMap();
+ }
+ return nullptr;
+}
+
+void Symbolizer::ClearAddrMap() {
+ for (int i = 0; i != addr_map_.Size(); i++) {
+ ObjFile *o = addr_map_.At(i);
+ base_internal::LowLevelAlloc::Free(o->filename);
+ if (o->fd >= 0) {
+ NO_INTR(close(o->fd));
+ }
+ }
+ addr_map_.Clear();
+ addr_map_read_ = false;
+}
+
+// Callback for ReadAddrMap to register objfiles in an in-memory table.
+bool Symbolizer::RegisterObjFile(const char *filename,
+ const void *const start_addr,
+ const void *const end_addr, uint64_t offset,
+ void *arg) {
+ Symbolizer *impl = static_cast<Symbolizer *>(arg);
+
+ // Files are supposed to be added in the increasing address order. Make
+ // sure that's the case.
+ int addr_map_size = impl->addr_map_.Size();
+ if (addr_map_size != 0) {
+ ObjFile *old = impl->addr_map_.At(addr_map_size - 1);
+ if (old->end_addr > end_addr) {
+ ABSL_RAW_LOG(ERROR,
+ "Unsorted addr map entry: 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR
+ ": %s",
+ reinterpret_cast<uintptr_t>(end_addr), filename,
+ reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
+ return true;
+ } else if (old->end_addr == end_addr) {
+ // The same entry appears twice. This sometimes happens for [vdso].
+ if (old->start_addr != start_addr ||
+ strcmp(old->filename, filename) != 0) {
+ ABSL_RAW_LOG(ERROR,
+ "Duplicate addr 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR ": %s",
+ reinterpret_cast<uintptr_t>(end_addr), filename,
+ reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
+ }
+ return true;
+ }
+ }
+ ObjFile *obj = impl->addr_map_.Add();
+ obj->filename = impl->CopyString(filename);
+ obj->start_addr = start_addr;
+ obj->end_addr = end_addr;
+ obj->offset = offset;
+ obj->elf_type = -1; // filled on demand
+ obj->fd = -1; // opened on demand
+ return true;
+}
+
+// This function wraps the Demangle function to provide an interface
+// where the input symbol is demangled in-place.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size,
+ char *tmp_buf,
+ int tmp_buf_size) {
+ if (Demangle(out, tmp_buf, tmp_buf_size)) {
+ // Demangling succeeded. Copy to out if the space allows.
+ int len = strlen(tmp_buf);
+ if (len + 1 <= out_size) { // +1 for '\0'.
+ SAFE_ASSERT(len < tmp_buf_size);
+ memmove(out, tmp_buf, len + 1);
+ }
+ }
+}
+
+SymbolCacheLine *Symbolizer::GetCacheLine(const void *const pc) {
+ uintptr_t pc0 = reinterpret_cast<uintptr_t>(pc);
+ pc0 >>= 3; // drop the low 3 bits
+
+ // Shuffle bits.
+ pc0 ^= (pc0 >> 6) ^ (pc0 >> 12) ^ (pc0 >> 18);
+ return &symbol_cache_[pc0 % SYMBOL_CACHE_LINES];
+}
+
+void Symbolizer::AgeSymbols(SymbolCacheLine *line) {
+ for (uint32_t &age : line->age) {
+ ++age;
+ }
+}
+
+const char *Symbolizer::FindSymbolInCache(const void *const pc) {
+ if (pc == nullptr) return nullptr;
+
+ SymbolCacheLine *line = GetCacheLine(pc);
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) {
+ if (line->pc[i] == pc) {
+ AgeSymbols(line);
+ line->age[i] = 0;
+ return line->name[i];
+ }
+ }
+ return nullptr;
+}
+
+const char *Symbolizer::InsertSymbolInCache(const void *const pc,
+ const char *name) {
+ SAFE_ASSERT(pc != nullptr);
+
+ SymbolCacheLine *line = GetCacheLine(pc);
+ uint32_t max_age = 0;
+ int oldest_index = -1;
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) {
+ if (line->pc[i] == nullptr) {
+ AgeSymbols(line);
+ line->pc[i] = pc;
+ line->name[i] = CopyString(name);
+ line->age[i] = 0;
+ return line->name[i];
+ }
+ if (line->age[i] >= max_age) {
+ max_age = line->age[i];
+ oldest_index = i;
+ }
+ }
+
+ AgeSymbols(line);
+ ABSL_RAW_CHECK(oldest_index >= 0, "Corrupt cache");
+ base_internal::LowLevelAlloc::Free(line->name[oldest_index]);
+ line->pc[oldest_index] = pc;
+ line->name[oldest_index] = CopyString(name);
+ line->age[oldest_index] = 0;
+ return line->name[oldest_index];
+}
+
+static void MaybeOpenFdFromSelfExe(ObjFile *obj) {
+ if (memcmp(obj->start_addr, ELFMAG, SELFMAG) != 0) {
+ return;
+ }
+ int fd = open("/proc/self/exe", O_RDONLY);
+ if (fd == -1) {
+ return;
+ }
+ // Verify that contents of /proc/self/exe matches in-memory image of
+ // the binary. This can fail if the "deleted" binary is in fact not
+ // the main executable, or for binaries that have the first PT_LOAD
+ // segment smaller than 4K. We do it in four steps so that the
+ // buffer is smaller and we don't consume too much stack space.
+ const char *mem = reinterpret_cast<const char *>(obj->start_addr);
+ for (int i = 0; i < 4; ++i) {
+ char buf[1024];
+ ssize_t n = read(fd, buf, sizeof(buf));
+ if (n != sizeof(buf) || memcmp(buf, mem, sizeof(buf)) != 0) {
+ close(fd);
+ return;
+ }
+ mem += sizeof(buf);
+ }
+ obj->fd = fd;
+}
+
+static bool MaybeInitializeObjFile(ObjFile *obj) {
+ if (obj->fd < 0) {
+ obj->fd = open(obj->filename, O_RDONLY);
+
+ if (obj->fd < 0) {
+ // Getting /proc/self/exe here means that we were hinted.
+ if (strcmp(obj->filename, "/proc/self/exe") == 0) {
+ // /proc/self/exe may be inaccessible (due to setuid, etc.), so try
+ // accessing the binary via argv0.
+ if (argv0_value != nullptr) {
+ obj->fd = open(argv0_value, O_RDONLY);
+ }
+ } else {
+ MaybeOpenFdFromSelfExe(obj);
+ }
+ }
+
+ if (obj->fd < 0) {
+ ABSL_RAW_LOG(WARNING, "%s: open failed: errno=%d", obj->filename, errno);
+ return false;
+ }
+ obj->elf_type = FileGetElfType(obj->fd);
+ if (obj->elf_type < 0) {
+ ABSL_RAW_LOG(WARNING, "%s: wrong elf type: %d", obj->filename,
+ obj->elf_type);
+ return false;
+ }
+
+ if (!ReadFromOffsetExact(obj->fd, &obj->elf_header, sizeof(obj->elf_header),
+ 0)) {
+ ABSL_RAW_LOG(WARNING, "%s: failed to read elf header", obj->filename);
+ return false;
+ }
+ }
+ return true;
+}
+
+// The implementation of our symbolization routine. If it
+// successfully finds the symbol containing "pc" and obtains the
+// symbol name, returns pointer to that symbol. Otherwise, returns nullptr.
+// If any symbol decorators have been installed via InstallSymbolDecorator(),
+// they are called here as well.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+const char *Symbolizer::GetSymbol(const void *const pc) {
+ const char *entry = FindSymbolInCache(pc);
+ if (entry != nullptr) {
+ return entry;
+ }
+ symbol_buf_[0] = '\0';
+
+ ObjFile *const obj = FindObjFile(pc, 1);
+ ptrdiff_t relocation = 0;
+ int fd = -1;
+ if (obj != nullptr) {
+ if (MaybeInitializeObjFile(obj)) {
+ if (obj->elf_type == ET_DYN &&
+ reinterpret_cast<uint64_t>(obj->start_addr) >= obj->offset) {
+ // This object was relocated.
+ //
+ // For obj->offset > 0, adjust the relocation since a mapping at offset
+ // X in the file will have a start address of [true relocation]+X.
+ relocation = reinterpret_cast<ptrdiff_t>(obj->start_addr) - obj->offset;
+ }
+
+ fd = obj->fd;
+ }
+ if (GetSymbolFromObjectFile(*obj, pc, relocation, symbol_buf_,
+ sizeof(symbol_buf_), tmp_buf_,
+ sizeof(tmp_buf_)) == SYMBOL_FOUND) {
+ // Only try to demangle the symbol name if it fit into symbol_buf_.
+ DemangleInplace(symbol_buf_, sizeof(symbol_buf_), tmp_buf_,
+ sizeof(tmp_buf_));
+ }
+ } else {
+#if ABSL_HAVE_VDSO_SUPPORT
+ VDSOSupport vdso;
+ if (vdso.IsPresent()) {
+ VDSOSupport::SymbolInfo symbol_info;
+ if (vdso.LookupSymbolByAddress(pc, &symbol_info)) {
+ // All VDSO symbols are known to be short.
+ size_t len = strlen(symbol_info.name);
+ ABSL_RAW_CHECK(len + 1 < sizeof(symbol_buf_),
+ "VDSO symbol unexpectedly long");
+ memcpy(symbol_buf_, symbol_info.name, len + 1);
+ }
+ }
+#endif
+ }
+
+ if (g_decorators_mu.TryLock()) {
+ if (g_num_decorators > 0) {
+ SymbolDecoratorArgs decorator_args = {
+ pc, relocation, fd, symbol_buf_, sizeof(symbol_buf_),
+ tmp_buf_, sizeof(tmp_buf_), nullptr};
+ for (int i = 0; i < g_num_decorators; ++i) {
+ decorator_args.arg = g_decorators[i].arg;
+ g_decorators[i].fn(&decorator_args);
+ }
+ }
+ g_decorators_mu.Unlock();
+ }
+ if (symbol_buf_[0] == '\0') {
+ return nullptr;
+ }
+ symbol_buf_[sizeof(symbol_buf_) - 1] = '\0'; // Paranoia.
+ return InsertSymbolInCache(pc, symbol_buf_);
+}
+
+bool RemoveAllSymbolDecorators(void) {
+ if (!g_decorators_mu.TryLock()) {
+ // Someone else is using decorators. Get out.
+ return false;
+ }
+ g_num_decorators = 0;
+ g_decorators_mu.Unlock();
+ return true;
+}
+
+bool RemoveSymbolDecorator(int ticket) {
+ if (!g_decorators_mu.TryLock()) {
+ // Someone else is using decorators. Get out.
+ return false;
+ }
+ for (int i = 0; i < g_num_decorators; ++i) {
+ if (g_decorators[i].ticket == ticket) {
+ while (i < g_num_decorators - 1) {
+ g_decorators[i] = g_decorators[i + 1];
+ ++i;
+ }
+ g_num_decorators = i;
+ break;
+ }
+ }
+ g_decorators_mu.Unlock();
+ return true; // Decorator is known to be removed.
+}
+
+int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) {
+ static int ticket = 0;
+
+ if (!g_decorators_mu.TryLock()) {
+ // Someone else is using decorators. Get out.
+ return false;
+ }
+ int ret = ticket;
+ if (g_num_decorators >= kMaxDecorators) {
+ ret = -1;
+ } else {
+ g_decorators[g_num_decorators] = {decorator, arg, ticket++};
+ ++g_num_decorators;
+ }
+ g_decorators_mu.Unlock();
+ return ret;
+}
+
+bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset,
+ const char *filename) {
+ SAFE_ASSERT(start <= end);
+ SAFE_ASSERT(filename != nullptr);
+
+ InitSigSafeArena();
+
+ if (!g_file_mapping_mu.TryLock()) {
+ return false;
+ }
+
+ bool ret = true;
+ if (g_num_file_mapping_hints >= kMaxFileMappingHints) {
+ ret = false;
+ } else {
+ // TODO(ckennelly): Move this into a std::string copy routine.
+ int len = strlen(filename);
+ char *dst = static_cast<char *>(
+ base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena()));
+ ABSL_RAW_CHECK(dst != nullptr, "out of memory");
+ memcpy(dst, filename, len + 1);
+
+ auto &hint = g_file_mapping_hints[g_num_file_mapping_hints++];
+ hint.start = start;
+ hint.end = end;
+ hint.offset = offset;
+ hint.filename = dst;
+ }
+
+ g_file_mapping_mu.Unlock();
+ return ret;
+}
+
+bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset,
+ const char **filename) {
+ if (!g_file_mapping_mu.TryLock()) {
+ return false;
+ }
+ bool found = false;
+ for (int i = 0; i < g_num_file_mapping_hints; i++) {
+ if (g_file_mapping_hints[i].start <= *start &&
+ *end <= g_file_mapping_hints[i].end) {
+ // We assume that the start_address for the mapping is the base
+ // address of the ELF section, but when [start_address,end_address) is
+ // not strictly equal to [hint.start, hint.end), that assumption is
+ // invalid.
+ //
+ // This uses the hint's start address (even though hint.start is not
+ // necessarily equal to start_address) to ensure the correct
+ // relocation is computed later.
+ *start = g_file_mapping_hints[i].start;
+ *end = g_file_mapping_hints[i].end;
+ *offset = g_file_mapping_hints[i].offset;
+ *filename = g_file_mapping_hints[i].filename;
+ found = true;
+ break;
+ }
+ }
+ g_file_mapping_mu.Unlock();
+ return found;
+}
+
+} // namespace debugging_internal
+
+bool Symbolize(const void *pc, char *out, int out_size) {
+ // Symbolization is very slow under tsan.
+ ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ SAFE_ASSERT(out_size >= 0);
+ debugging_internal::Symbolizer *s = debugging_internal::AllocateSymbolizer();
+ const char *name = s->GetSymbol(pc);
+ bool ok = false;
+ if (name != nullptr && out_size > 0) {
+ strncpy(out, name, out_size);
+ ok = true;
+ if (out[out_size - 1] != '\0') {
+ // strncpy() does not '\0' terminate when it truncates. Do so, with
+ // trailing ellipsis.
+ static constexpr char kEllipsis[] = "...";
+ int ellipsis_size =
+ std::min(implicit_cast<int>(strlen(kEllipsis)), out_size - 1);
+ memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+ out[out_size - 1] = '\0';
+ }
+ }
+ debugging_internal::FreeSymbolizer(s);
+ ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ return ok;
+}
+
+} // namespace absl
diff --git a/absl/debugging/symbolize_test.cc b/absl/debugging/symbolize_test.cc
new file mode 100644
index 0000000..08068c3
--- /dev/null
+++ b/absl/debugging/symbolize_test.cc
@@ -0,0 +1,522 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/debugging/symbolize.h"
+
+#ifndef _WIN32
+#include <fcntl.h>
+#include <sys/mman.h>
+#endif
+
+#include <cstring>
+#include <iostream>
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/casts.h"
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/optimization.h"
+#include "absl/debugging/internal/stack_consumption.h"
+#include "absl/memory/memory.h"
+
+using testing::Contains;
+
+// Functions to symbolize. Use C linkage to avoid mangled names.
+extern "C" {
+void nonstatic_func() { ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); }
+static void static_func() { ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); }
+} // extern "C"
+
+struct Foo {
+ static void func(int x);
+};
+
+// A C++ method that should have a mangled name.
+void ABSL_ATTRIBUTE_NOINLINE Foo::func(int) {
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+}
+
+// Create functions that will remain in different text sections in the
+// final binary when linker option "-z,keep-text-section-prefix" is used.
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.unlikely) unlikely_func() {
+ return 0;
+}
+
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.hot) hot_func() {
+ return 0;
+}
+
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.startup) startup_func() {
+ return 0;
+}
+
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.exit) exit_func() {
+ return 0;
+}
+
+int /*ABSL_ATTRIBUTE_SECTION_VARIABLE(.text)*/ regular_func() {
+ return 0;
+}
+
+// Thread-local data may confuse the symbolizer, ensure that it does not.
+// Variable sizes and order are important.
+#if ABSL_PER_THREAD_TLS
+static ABSL_PER_THREAD_TLS_KEYWORD char symbolize_test_thread_small[1];
+static ABSL_PER_THREAD_TLS_KEYWORD char
+ symbolize_test_thread_big[2 * 1024 * 1024];
+#endif
+
+// Used below to hopefully inhibit some compiler/linker optimizations
+// that may remove kHpageTextPadding, kPadding0, and kPadding1 from
+// the binary.
+static volatile bool volatile_bool = false;
+
+// Force the binary to be large enough that a THP .text remap will succeed.
+static constexpr size_t kHpageSize = 1 << 21;
+const char kHpageTextPadding[kHpageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(
+ .text) = "";
+
+static char try_symbolize_buffer[4096];
+
+// A wrapper function for absl::Symbolize() to make the unit test simple. The
+// limit must be < sizeof(try_symbolize_buffer). Returns null if
+// absl::Symbolize() returns false, otherwise returns try_symbolize_buffer with
+// the result of absl::Symbolize().
+static const char *TrySymbolizeWithLimit(void *pc, int limit) {
+ ABSL_RAW_CHECK(limit <= sizeof(try_symbolize_buffer),
+ "try_symbolize_buffer is too small");
+
+ // Use the heap to facilitate heap and buffer sanitizer tools.
+ auto heap_buffer = absl::make_unique<char[]>(sizeof(try_symbolize_buffer));
+ bool found = absl::Symbolize(pc, heap_buffer.get(), limit);
+ if (found) {
+ ABSL_RAW_CHECK(strnlen(heap_buffer.get(), limit) < limit,
+ "absl::Symbolize() did not properly terminate the string");
+ strncpy(try_symbolize_buffer, heap_buffer.get(),
+ sizeof(try_symbolize_buffer) - 1);
+ try_symbolize_buffer[sizeof(try_symbolize_buffer) - 1] = '\0';
+ }
+
+ return found ? try_symbolize_buffer : nullptr;
+}
+
+// A wrapper for TrySymbolizeWithLimit(), with a large limit.
+static const char *TrySymbolize(void *pc) {
+ return TrySymbolizeWithLimit(pc, sizeof(try_symbolize_buffer));
+}
+
+#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
+
+TEST(Symbolize, Cached) {
+ // Compilers should give us pointers to them.
+ EXPECT_STREQ("nonstatic_func", TrySymbolize((void *)(&nonstatic_func)));
+
+ // The name of an internal linkage symbol is not specified; allow either a
+ // mangled or an unmangled name here.
+ const char *static_func_symbol = TrySymbolize((void *)(&static_func));
+ EXPECT_TRUE(strcmp("static_func", static_func_symbol) == 0 ||
+ strcmp("static_func()", static_func_symbol) == 0);
+
+ EXPECT_TRUE(nullptr == TrySymbolize(nullptr));
+}
+
+TEST(Symbolize, Truncation) {
+ constexpr char kNonStaticFunc[] = "nonstatic_func";
+ EXPECT_STREQ("nonstatic_func",
+ TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ strlen(kNonStaticFunc) + 1));
+ EXPECT_STREQ("nonstatic_...",
+ TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ strlen(kNonStaticFunc) + 0));
+ EXPECT_STREQ("nonstatic...",
+ TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ strlen(kNonStaticFunc) - 1));
+ EXPECT_STREQ("n...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 5));
+ EXPECT_STREQ("...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 4));
+ EXPECT_STREQ("..", TrySymbolizeWithLimit((void *)(&nonstatic_func), 3));
+ EXPECT_STREQ(".", TrySymbolizeWithLimit((void *)(&nonstatic_func), 2));
+ EXPECT_STREQ("", TrySymbolizeWithLimit((void *)(&nonstatic_func), 1));
+ EXPECT_EQ(nullptr, TrySymbolizeWithLimit((void *)(&nonstatic_func), 0));
+}
+
+TEST(Symbolize, SymbolizeWithDemangling) {
+ Foo::func(100);
+ EXPECT_STREQ("Foo::func()", TrySymbolize((void *)(&Foo::func)));
+}
+
+TEST(Symbolize, SymbolizeSplitTextSections) {
+ EXPECT_STREQ("unlikely_func()", TrySymbolize((void *)(&unlikely_func)));
+ EXPECT_STREQ("hot_func()", TrySymbolize((void *)(&hot_func)));
+ EXPECT_STREQ("startup_func()", TrySymbolize((void *)(&startup_func)));
+ EXPECT_STREQ("exit_func()", TrySymbolize((void *)(&exit_func)));
+ EXPECT_STREQ("regular_func()", TrySymbolize((void *)(®ular_func)));
+}
+
+// Tests that verify that Symbolize stack footprint is within some limit.
+#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+
+static void *g_pc_to_symbolize;
+static char g_symbolize_buffer[4096];
+static char *g_symbolize_result;
+
+static void SymbolizeSignalHandler(int signo) {
+ if (absl::Symbolize(g_pc_to_symbolize, g_symbolize_buffer,
+ sizeof(g_symbolize_buffer))) {
+ g_symbolize_result = g_symbolize_buffer;
+ } else {
+ g_symbolize_result = nullptr;
+ }
+}
+
+// Call Symbolize and figure out the stack footprint of this call.
+static const char *SymbolizeStackConsumption(void *pc, int *stack_consumed) {
+ g_pc_to_symbolize = pc;
+ *stack_consumed = absl::debugging_internal::GetSignalHandlerStackConsumption(
+ SymbolizeSignalHandler);
+ return g_symbolize_result;
+}
+
+static int GetStackConsumptionUpperLimit() {
+ // Symbolize stack consumption should be within 2kB.
+ int stack_consumption_upper_limit = 2048;
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Account for sanitizer instrumentation requiring additional stack space.
+ stack_consumption_upper_limit *= 5;
+#endif
+ return stack_consumption_upper_limit;
+}
+
+TEST(Symbolize, SymbolizeStackConsumption) {
+ int stack_consumed = 0;
+
+ const char *symbol =
+ SymbolizeStackConsumption((void *)(&nonstatic_func), &stack_consumed);
+ EXPECT_STREQ("nonstatic_func", symbol);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, GetStackConsumptionUpperLimit());
+
+ // The name of an internal linkage symbol is not specified; allow either a
+ // mangled or an unmangled name here.
+ symbol = SymbolizeStackConsumption((void *)(&static_func), &stack_consumed);
+ EXPECT_TRUE(strcmp("static_func", symbol) == 0 ||
+ strcmp("static_func()", symbol) == 0);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, GetStackConsumptionUpperLimit());
+}
+
+TEST(Symbolize, SymbolizeWithDemanglingStackConsumption) {
+ Foo::func(100);
+ int stack_consumed = 0;
+
+ const char *symbol =
+ SymbolizeStackConsumption((void *)(&Foo::func), &stack_consumed);
+
+ EXPECT_STREQ("Foo::func()", symbol);
+ EXPECT_GT(stack_consumed, 0);
+ EXPECT_LT(stack_consumed, GetStackConsumptionUpperLimit());
+}
+
+#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
+
+// Use a 64K page size for PPC.
+const size_t kPageSize = 64 << 10;
+// We place a read-only symbols into the .text section and verify that we can
+// symbolize them and other symbols after remapping them.
+const char kPadding0[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) =
+ "";
+const char kPadding1[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) =
+ "";
+
+static int FilterElfHeader(struct dl_phdr_info *info, size_t size, void *data) {
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ if (info->dlpi_phdr[i].p_type == PT_LOAD &&
+ info->dlpi_phdr[i].p_flags == (PF_R | PF_X)) {
+ const void *const vaddr =
+ absl::bit_cast<void *>(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
+ const auto segsize = info->dlpi_phdr[i].p_memsz;
+
+ const char *self_exe;
+ if (info->dlpi_name != nullptr && info->dlpi_name[0] != '\0') {
+ self_exe = info->dlpi_name;
+ } else {
+ self_exe = "/proc/self/exe";
+ }
+
+ absl::debugging_internal::RegisterFileMappingHint(
+ vaddr, reinterpret_cast<const char *>(vaddr) + segsize,
+ info->dlpi_phdr[i].p_offset, self_exe);
+
+ return 1;
+ }
+ }
+
+ return 1;
+}
+
+TEST(Symbolize, SymbolizeWithMultipleMaps) {
+ // Force kPadding0 and kPadding1 to be linked in.
+ if (volatile_bool) {
+ ABSL_RAW_LOG(INFO, "%s", kPadding0);
+ ABSL_RAW_LOG(INFO, "%s", kPadding1);
+ }
+
+ // Verify we can symbolize everything.
+ char buf[512];
+ memset(buf, 0, sizeof(buf));
+ absl::Symbolize(kPadding0, buf, sizeof(buf));
+ EXPECT_STREQ("kPadding0", buf);
+
+ memset(buf, 0, sizeof(buf));
+ absl::Symbolize(kPadding1, buf, sizeof(buf));
+ EXPECT_STREQ("kPadding1", buf);
+
+ // Specify a hint for the executable segment.
+ dl_iterate_phdr(FilterElfHeader, nullptr);
+
+ // Reload at least one page out of kPadding0, kPadding1
+ const char *ptrs[] = {kPadding0, kPadding1};
+
+ for (const char *ptr : ptrs) {
+ const int kMapFlags = MAP_ANONYMOUS | MAP_PRIVATE;
+ void *addr = mmap(nullptr, kPageSize, PROT_READ, kMapFlags, 0, 0);
+ ASSERT_NE(addr, MAP_FAILED);
+
+ // kPadding[0-1] is full of zeroes, so we can remap anywhere within it, but
+ // we ensure there is at least a full page of padding.
+ void *remapped = reinterpret_cast<void *>(
+ reinterpret_cast<uintptr_t>(ptr + kPageSize) & ~(kPageSize - 1ULL));
+
+ const int kMremapFlags = (MREMAP_MAYMOVE | MREMAP_FIXED);
+ void *ret = mremap(addr, kPageSize, kPageSize, kMremapFlags, remapped);
+ ASSERT_NE(ret, MAP_FAILED);
+ }
+
+ // Invalidate the symbolization cache so we are forced to rely on the hint.
+ absl::Symbolize(nullptr, buf, sizeof(buf));
+
+ // Verify we can still symbolize.
+ const char *expected[] = {"kPadding0", "kPadding1"};
+ const size_t offsets[] = {0, kPageSize, 2 * kPageSize, 3 * kPageSize};
+
+ for (int i = 0; i < 2; i++) {
+ for (size_t offset : offsets) {
+ memset(buf, 0, sizeof(buf));
+ absl::Symbolize(ptrs[i] + offset, buf, sizeof(buf));
+ EXPECT_STREQ(expected[i], buf);
+ }
+ }
+}
+
+// Appends string(*args->arg) to args->symbol_buf.
+static void DummySymbolDecorator(
+ const absl::debugging_internal::SymbolDecoratorArgs *args) {
+ std::string *message = static_cast<std::string *>(args->arg);
+ strncat(args->symbol_buf, message->c_str(),
+ args->symbol_buf_size - strlen(args->symbol_buf) - 1);
+}
+
+TEST(Symbolize, InstallAndRemoveSymbolDecorators) {
+ int ticket_a;
+ std::string a_message("a");
+ EXPECT_GE(ticket_a = absl::debugging_internal::InstallSymbolDecorator(
+ DummySymbolDecorator, &a_message),
+ 0);
+
+ int ticket_b;
+ std::string b_message("b");
+ EXPECT_GE(ticket_b = absl::debugging_internal::InstallSymbolDecorator(
+ DummySymbolDecorator, &b_message),
+ 0);
+
+ int ticket_c;
+ std::string c_message("c");
+ EXPECT_GE(ticket_c = absl::debugging_internal::InstallSymbolDecorator(
+ DummySymbolDecorator, &c_message),
+ 0);
+
+ char *address = reinterpret_cast<char *>(1);
+ EXPECT_STREQ("abc", TrySymbolize(address++));
+
+ EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_b));
+
+ EXPECT_STREQ("ac", TrySymbolize(address++));
+
+ // Cleanup: remove all remaining decorators so other stack traces don't
+ // get mystery "ac" decoration.
+ EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_a));
+ EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_c));
+}
+
+// Some versions of Clang with optimizations enabled seem to be able
+// to optimize away the .data section if no variables live in the
+// section. This variable should get placed in the .data section, and
+// the test below checks for the existence of a .data section.
+static int in_data_section = 1;
+
+TEST(Symbolize, ForEachSection) {
+ int fd = TEMP_FAILURE_RETRY(open("/proc/self/exe", O_RDONLY));
+ ASSERT_NE(fd, -1);
+
+ std::vector<std::string> sections;
+ ASSERT_TRUE(absl::debugging_internal::ForEachSection(
+ fd, [§ions](const std::string &name, const ElfW(Shdr) &) {
+ sections.push_back(name);
+ return true;
+ }));
+
+ // Check for the presence of common section names.
+ EXPECT_THAT(sections, Contains(".text"));
+ EXPECT_THAT(sections, Contains(".rodata"));
+ EXPECT_THAT(sections, Contains(".bss"));
+ ++in_data_section;
+ EXPECT_THAT(sections, Contains(".data"));
+
+ close(fd);
+}
+
+// x86 specific tests. Uses some inline assembler.
+extern "C" {
+inline void *ABSL_ATTRIBUTE_ALWAYS_INLINE inline_func() {
+ void *pc = nullptr;
+#if defined(__i386__)
+ __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [ PC ] "=r"(pc));
+#elif defined(__x86_64__)
+ __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [ PC ] "=r"(pc));
+#endif
+ return pc;
+}
+
+void *ABSL_ATTRIBUTE_NOINLINE non_inline_func() {
+ void *pc = nullptr;
+#if defined(__i386__)
+ __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [ PC ] "=r"(pc));
+#elif defined(__x86_64__)
+ __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [ PC ] "=r"(pc));
+#endif
+ return pc;
+}
+
+void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideNonInlineFunction() {
+#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE) && \
+ (defined(__i386__) || defined(__x86_64__))
+ void *pc = non_inline_func();
+ const char *symbol = TrySymbolize(pc);
+ ABSL_RAW_CHECK(symbol != nullptr, "TestWithPCInsideNonInlineFunction failed");
+ ABSL_RAW_CHECK(strcmp(symbol, "non_inline_func") == 0,
+ "TestWithPCInsideNonInlineFunction failed");
+ std::cout << "TestWithPCInsideNonInlineFunction passed" << std::endl;
+#endif
+}
+
+void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
+#if defined(ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE) && \
+ (defined(__i386__) || defined(__x86_64__))
+ void *pc = inline_func(); // Must be inlined.
+ const char *symbol = TrySymbolize(pc);
+ ABSL_RAW_CHECK(symbol != nullptr, "TestWithPCInsideInlineFunction failed");
+ ABSL_RAW_CHECK(strcmp(symbol, __FUNCTION__) == 0,
+ "TestWithPCInsideInlineFunction failed");
+ std::cout << "TestWithPCInsideInlineFunction passed" << std::endl;
+#endif
+}
+}
+
+// Test with a return address.
+void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() {
+#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
+ void *return_address = __builtin_return_address(0);
+ const char *symbol = TrySymbolize(return_address);
+ ABSL_RAW_CHECK(symbol != nullptr, "TestWithReturnAddress failed");
+ ABSL_RAW_CHECK(strcmp(symbol, "main") == 0, "TestWithReturnAddress failed");
+ std::cout << "TestWithReturnAddress passed" << std::endl;
+#endif
+}
+
+#elif defined(_WIN32) && defined(_DEBUG)
+
+TEST(Symbolize, Basics) {
+ EXPECT_STREQ("nonstatic_func", TrySymbolize((void *)(&nonstatic_func)));
+
+ // The name of an internal linkage symbol is not specified; allow either a
+ // mangled or an unmangled name here.
+ const char* static_func_symbol = TrySymbolize((void *)(&static_func));
+ ASSERT_TRUE(static_func_symbol != nullptr);
+ EXPECT_TRUE(strstr(static_func_symbol, "static_func") != nullptr);
+
+ EXPECT_TRUE(nullptr == TrySymbolize(nullptr));
+}
+
+TEST(Symbolize, Truncation) {
+ constexpr char kNonStaticFunc[] = "nonstatic_func";
+ EXPECT_STREQ("nonstatic_func",
+ TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ strlen(kNonStaticFunc) + 1));
+ EXPECT_STREQ("nonstatic_...",
+ TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ strlen(kNonStaticFunc) + 0));
+ EXPECT_STREQ("nonstatic...",
+ TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ strlen(kNonStaticFunc) - 1));
+ EXPECT_STREQ("n...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 5));
+ EXPECT_STREQ("...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 4));
+ EXPECT_STREQ("..", TrySymbolizeWithLimit((void *)(&nonstatic_func), 3));
+ EXPECT_STREQ(".", TrySymbolizeWithLimit((void *)(&nonstatic_func), 2));
+ EXPECT_STREQ("", TrySymbolizeWithLimit((void *)(&nonstatic_func), 1));
+ EXPECT_EQ(nullptr, TrySymbolizeWithLimit((void *)(&nonstatic_func), 0));
+}
+
+TEST(Symbolize, SymbolizeWithDemangling) {
+ const char* result = TrySymbolize((void *)(&Foo::func));
+ ASSERT_TRUE(result != nullptr);
+ EXPECT_TRUE(strstr(result, "Foo::func") != nullptr) << result;
+}
+
+#else // Symbolizer unimplemented
+
+TEST(Symbolize, Unimplemented) {
+ char buf[64];
+ EXPECT_FALSE(absl::Symbolize((void *)(&nonstatic_func), buf, sizeof(buf)));
+ EXPECT_FALSE(absl::Symbolize((void *)(&static_func), buf, sizeof(buf)));
+ EXPECT_FALSE(absl::Symbolize((void *)(&Foo::func), buf, sizeof(buf)));
+}
+
+#endif
+
+int main(int argc, char **argv) {
+ // Make sure kHpageTextPadding is linked into the binary.
+ if (volatile_bool) {
+ ABSL_RAW_LOG(INFO, "%s", kHpageTextPadding);
+ }
+
+#if ABSL_PER_THREAD_TLS
+ // Touch the per-thread variables.
+ symbolize_test_thread_small[0] = 0;
+ symbolize_test_thread_big[0] = 0;
+#endif
+
+ absl::InitializeSymbolizer(argv[0]);
+ testing::InitGoogleTest(&argc, argv);
+
+#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
+ TestWithPCInsideInlineFunction();
+ TestWithPCInsideNonInlineFunction();
+ TestWithReturnAddress();
+#endif
+
+ return RUN_ALL_TESTS();
+}
diff --git a/absl/debugging/symbolize_unimplemented.inc b/absl/debugging/symbolize_unimplemented.inc
new file mode 100644
index 0000000..7c580fe
--- /dev/null
+++ b/absl/debugging/symbolize_unimplemented.inc
@@ -0,0 +1,38 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+
+namespace debugging_internal {
+
+int InstallSymbolDecorator(SymbolDecorator, void*) { return -1; }
+bool RemoveSymbolDecorator(int) { return false; }
+bool RemoveAllSymbolDecorators(void) { return false; }
+bool RegisterFileMappingHint(const void *, const void *, uint64_t, const char *) {
+ return false;
+}
+bool GetFileMappingHint(const void **, const void **, uint64_t *, const char **) {
+ return false;
+}
+
+} // namespace debugging_internal
+
+void InitializeSymbolizer(const char*) {}
+bool Symbolize(const void *, char *, int) { return false; }
+
+} // namespace absl
diff --git a/absl/debugging/symbolize_win32.inc b/absl/debugging/symbolize_win32.inc
new file mode 100644
index 0000000..5a55f29
--- /dev/null
+++ b/absl/debugging/symbolize_win32.inc
@@ -0,0 +1,80 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// See "Retrieving Symbol Information by Address":
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx
+
+#include <windows.h>
+
+// MSVC header DbgHelp.h has a warning for an ignored typedef.
+#pragma warning(push)
+#pragma warning(disable:4091)
+#include <DbgHelp.h>
+#pragma warning(pop)
+
+#pragma comment(lib, "dbghelp.lib")
+
+#include <algorithm>
+#include <cstring>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+
+static HANDLE process = NULL;
+
+void InitializeSymbolizer(const char *argv0) {
+ if (process != nullptr) {
+ return;
+ }
+ process = GetCurrentProcess();
+
+ // Symbols are not loaded until a reference is made requiring the
+ // symbols be loaded. This is the fastest, most efficient way to use
+ // the symbol handler.
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME);
+ if (!SymInitialize(process, nullptr, true)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long long error{GetLastError()}; // NOLINT(runtime/int)
+ ABSL_RAW_LOG(FATAL, "SymInitialize() failed: %llu", error);
+ }
+}
+
+bool Symbolize(const void *pc, char *out, int out_size) {
+ if (out_size <= 0) {
+ return false;
+ }
+ std::aligned_storage<sizeof(SYMBOL_INFO) + MAX_SYM_NAME,
+ alignof(SYMBOL_INFO)>::type buf;
+ SYMBOL_INFO *symbol = reinterpret_cast<SYMBOL_INFO *>(&buf);
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ if (!SymFromAddr(process, reinterpret_cast<DWORD64>(pc), nullptr, symbol)) {
+ return false;
+ }
+ strncpy(out, symbol->Name, out_size);
+ if (out[out_size - 1] != '\0') {
+ // strncpy() does not '\0' terminate when it truncates.
+ static constexpr char kEllipsis[] = "...";
+ int ellipsis_size =
+ std::min<int>(sizeof(kEllipsis) - 1, out_size - 1);
+ memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size);
+ out[out_size - 1] = '\0';
+ }
+ return true;
+}
+
+} // namespace absl