Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 1 | // Copyright 2017 The Abseil Authors. |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 2 | // |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 14 | // |
| 15 | // Portable implementation - just use glibc |
| 16 | // |
| 17 | // Note: The glibc implementation may cause a call to malloc. |
| 18 | // This can cause a deadlock in HeapProfiler. |
| 19 | |
| 20 | #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ |
| 21 | #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ |
| 22 | |
| 23 | #include <execinfo.h> |
| 24 | #include <atomic> |
| 25 | #include <cstring> |
| 26 | |
| 27 | #include "absl/debugging/stacktrace.h" |
| 28 | #include "absl/base/attributes.h" |
| 29 | |
| 30 | // Sometimes, we can try to get a stack trace from within a stack |
| 31 | // trace, because we don't block signals inside this code (which would be too |
| 32 | // expensive: the two extra system calls per stack trace do matter here). |
| 33 | // That can cause a self-deadlock. |
| 34 | // Protect against such reentrant call by failing to get a stack trace. |
| 35 | // |
| 36 | // We use __thread here because the code here is extremely low level -- it is |
| 37 | // called while collecting stack traces from within malloc and mmap, and thus |
| 38 | // can not call anything which might call malloc or mmap itself. |
| 39 | static __thread int recursive = 0; |
| 40 | |
| 41 | // The stack trace function might be invoked very early in the program's |
| 42 | // execution (e.g. from the very first malloc if using tcmalloc). Also, the |
| 43 | // glibc implementation itself will trigger malloc the first time it is called. |
| 44 | // As such, we suppress usage of backtrace during this early stage of execution. |
| 45 | static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy. |
| 46 | // Waiting until static initializers run seems to be late enough. |
| 47 | // This file is included into stacktrace.cc so this will only run once. |
| 48 | ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() { |
| 49 | void* unused_stack[1]; |
| 50 | // Force the first backtrace to happen early to get the one-time shared lib |
| 51 | // loading (allocation) out of the way. After the first call it is much safer |
| 52 | // to use backtrace from a signal handler if we crash somewhere later. |
| 53 | backtrace(unused_stack, 1); |
| 54 | disable_stacktraces.store(false, std::memory_order_relaxed); |
| 55 | return 0; |
| 56 | }(); |
| 57 | |
| 58 | template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT> |
| 59 | static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, |
| 60 | const void *ucp, int *min_dropped_frames) { |
| 61 | if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) { |
| 62 | return 0; |
| 63 | } |
| 64 | ++recursive; |
| 65 | |
| 66 | static_cast<void>(ucp); // Unused. |
| 67 | static const int kStackLength = 64; |
| 68 | void * stack[kStackLength]; |
| 69 | int size; |
| 70 | |
| 71 | size = backtrace(stack, kStackLength); |
| 72 | skip_count++; // we want to skip the current frame as well |
| 73 | int result_count = size - skip_count; |
| 74 | if (result_count < 0) |
| 75 | result_count = 0; |
| 76 | if (result_count > max_depth) |
| 77 | result_count = max_depth; |
| 78 | for (int i = 0; i < result_count; i++) |
| 79 | result[i] = stack[i + skip_count]; |
| 80 | |
| 81 | if (IS_STACK_FRAMES) { |
| 82 | // No implementation for finding out the stack frame sizes yet. |
| 83 | memset(sizes, 0, sizeof(*sizes) * result_count); |
| 84 | } |
| 85 | if (min_dropped_frames != nullptr) { |
| 86 | if (size - skip_count - max_depth > 0) { |
| 87 | *min_dropped_frames = size - skip_count - max_depth; |
| 88 | } else { |
| 89 | *min_dropped_frames = 0; |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | --recursive; |
| 94 | |
| 95 | return result_count; |
| 96 | } |
| 97 | |
| 98 | namespace absl { |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 99 | ABSL_NAMESPACE_BEGIN |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 100 | namespace debugging_internal { |
| 101 | bool StackTraceWorksForTest() { |
| 102 | return true; |
| 103 | } |
| 104 | } // namespace debugging_internal |
Austin Schuh | b4691e9 | 2020-12-31 12:37:18 -0800 | [diff] [blame^] | 105 | ABSL_NAMESPACE_END |
Austin Schuh | 36244a1 | 2019-09-21 17:52:38 -0700 | [diff] [blame] | 106 | } // namespace absl |
| 107 | |
| 108 | #endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ |