blob: 81a49ef2e8605d3d495042e0953470a2ef61b910 [file] [log] [blame]
Austin Schuh36244a12019-09-21 17:52:38 -07001// Copyright 2000 - 2007 Google Inc.
2// All rights reserved.
3//
4// Author: Sanjay Ghemawat
5//
6// Portable implementation - just use glibc
7//
8// Note: The glibc implementation may cause a call to malloc.
9// This can cause a deadlock in HeapProfiler.
10
11#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
12#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
13
14#include <execinfo.h>
15#include <atomic>
16#include <cstring>
17
18#include "absl/debugging/stacktrace.h"
19#include "absl/base/attributes.h"
20
21// Sometimes, we can try to get a stack trace from within a stack
22// trace, because we don't block signals inside this code (which would be too
23// expensive: the two extra system calls per stack trace do matter here).
24// That can cause a self-deadlock.
25// Protect against such reentrant call by failing to get a stack trace.
26//
27// We use __thread here because the code here is extremely low level -- it is
28// called while collecting stack traces from within malloc and mmap, and thus
29// can not call anything which might call malloc or mmap itself.
30static __thread int recursive = 0;
31
32// The stack trace function might be invoked very early in the program's
33// execution (e.g. from the very first malloc if using tcmalloc). Also, the
34// glibc implementation itself will trigger malloc the first time it is called.
35// As such, we suppress usage of backtrace during this early stage of execution.
36static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
37// Waiting until static initializers run seems to be late enough.
38// This file is included into stacktrace.cc so this will only run once.
39ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
40 void* unused_stack[1];
41 // Force the first backtrace to happen early to get the one-time shared lib
42 // loading (allocation) out of the way. After the first call it is much safer
43 // to use backtrace from a signal handler if we crash somewhere later.
44 backtrace(unused_stack, 1);
45 disable_stacktraces.store(false, std::memory_order_relaxed);
46 return 0;
47}();
48
49template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
50static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
51 const void *ucp, int *min_dropped_frames) {
52 if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
53 return 0;
54 }
55 ++recursive;
56
57 static_cast<void>(ucp); // Unused.
58 static const int kStackLength = 64;
59 void * stack[kStackLength];
60 int size;
61
62 size = backtrace(stack, kStackLength);
63 skip_count++; // we want to skip the current frame as well
64 int result_count = size - skip_count;
65 if (result_count < 0)
66 result_count = 0;
67 if (result_count > max_depth)
68 result_count = max_depth;
69 for (int i = 0; i < result_count; i++)
70 result[i] = stack[i + skip_count];
71
72 if (IS_STACK_FRAMES) {
73 // No implementation for finding out the stack frame sizes yet.
74 memset(sizes, 0, sizeof(*sizes) * result_count);
75 }
76 if (min_dropped_frames != nullptr) {
77 if (size - skip_count - max_depth > 0) {
78 *min_dropped_frames = size - skip_count - max_depth;
79 } else {
80 *min_dropped_frames = 0;
81 }
82 }
83
84 --recursive;
85
86 return result_count;
87}
88
89namespace absl {
90namespace debugging_internal {
91bool StackTraceWorksForTest() {
92 return true;
93}
94} // namespace debugging_internal
95} // namespace absl
96
97#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_