blob: 7ed6b3eb82dc2f039ac08531c0b17eb1bb976ed9 [file] [log] [blame]
Austin Schuh36244a12019-09-21 17:52:38 -07001#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
2#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
3
4// Generate stack tracer for aarch64
5
6#if defined(__linux__)
7#include <sys/mman.h>
8#include <ucontext.h>
9#include <unistd.h>
10#endif
11
12#include <atomic>
13#include <cassert>
14#include <cstdint>
15#include <iostream>
16
17#include "absl/base/attributes.h"
18#include "absl/debugging/internal/address_is_readable.h"
19#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
20#include "absl/debugging/stacktrace.h"
21
22static const uintptr_t kUnknownFrameSize = 0;
23
24#if defined(__linux__)
25// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
26static const unsigned char* GetKernelRtSigreturnAddress() {
27 constexpr uintptr_t kImpossibleAddress = 1;
28 ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
29 uintptr_t address = memoized.load(std::memory_order_relaxed);
30 if (address != kImpossibleAddress) {
31 return reinterpret_cast<const unsigned char*>(address);
32 }
33
34 address = reinterpret_cast<uintptr_t>(nullptr);
35
36#ifdef ABSL_HAVE_VDSO_SUPPORT
37 absl::debugging_internal::VDSOSupport vdso;
38 if (vdso.IsPresent()) {
39 absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
40 if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC,
41 &symbol_info) ||
42 symbol_info.address == nullptr) {
43 // Unexpected: VDSO is present, yet the expected symbol is missing
44 // or null.
45 assert(false && "VDSO is present, but doesn't have expected symbol");
46 } else {
47 if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
48 kImpossibleAddress) {
49 address = reinterpret_cast<uintptr_t>(symbol_info.address);
50 } else {
51 assert(false && "VDSO returned invalid address");
52 }
53 }
54 }
55#endif
56
57 memoized.store(address, std::memory_order_relaxed);
58 return reinterpret_cast<const unsigned char*>(address);
59}
60#endif // __linux__
61
62// Compute the size of a stack frame in [low..high). We assume that
63// low < high. Return size of kUnknownFrameSize.
64template<typename T>
65static inline uintptr_t ComputeStackFrameSize(const T* low,
66 const T* high) {
67 const char* low_char_ptr = reinterpret_cast<const char *>(low);
68 const char* high_char_ptr = reinterpret_cast<const char *>(high);
69 return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
70}
71
72// Given a pointer to a stack frame, locate and return the calling
73// stackframe, or return null if no stackframe can be found. Perform sanity
74// checks (the strictness of which is controlled by the boolean parameter
75// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
76template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
77static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
78 void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
79 bool check_frame_size = true;
80
81#if defined(__linux__)
82 if (WITH_CONTEXT && uc != nullptr) {
83 // Check to see if next frame's return address is __kernel_rt_sigreturn.
84 if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
85 const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
86 // old_frame_pointer[0] is not suitable for unwinding, look at
87 // ucontext to discover frame pointer before signal.
88 void **const pre_signal_frame_pointer =
89 reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
90
91 // Check that alleged frame pointer is actually readable. This is to
92 // prevent "double fault" in case we hit the first fault due to e.g.
93 // stack corruption.
94 if (!absl::debugging_internal::AddressIsReadable(
95 pre_signal_frame_pointer))
96 return nullptr;
97
98 // Alleged frame pointer is readable, use it for further unwinding.
99 new_frame_pointer = pre_signal_frame_pointer;
100
101 // Skip frame size check if we return from a signal. We may be using a
102 // an alternate stack for signals.
103 check_frame_size = false;
104 }
105 }
106#endif
107
108 // aarch64 ABI requires stack pointer to be 16-byte-aligned.
109 if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
110 return nullptr;
111
112 // Check frame size. In strict mode, we assume frames to be under
113 // 100,000 bytes. In non-strict mode, we relax the limit to 1MB.
114 if (check_frame_size) {
115 const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
116 const uintptr_t frame_size =
117 ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
118 if (frame_size == kUnknownFrameSize || frame_size > max_size)
119 return nullptr;
120 }
121
122 return new_frame_pointer;
123}
124
125template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
126static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
127 const void *ucp, int *min_dropped_frames) {
128#ifdef __GNUC__
129 void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
130#else
131# error reading stack point not yet supported on this platform.
132#endif
133
134 skip_count++; // Skip the frame for this function.
135 int n = 0;
136
137 // The frame pointer points to low address of a frame. The first 64-bit
138 // word of a frame points to the next frame up the call chain, which normally
139 // is just after the high address of the current frame. The second word of
140 // a frame contains return adress of to the caller. To find a pc value
141 // associated with the current frame, we need to go down a level in the call
142 // chain. So we remember return the address of the last frame seen. This
143 // does not work for the first stack frame, which belongs to UnwindImp() but
144 // we skip the frame for UnwindImp() anyway.
145 void* prev_return_address = nullptr;
146
147 while (frame_pointer && n < max_depth) {
148 // The absl::GetStackFrames routine is called when we are in some
149 // informational context (the failure signal handler for example).
150 // Use the non-strict unwinding rules to produce a stack trace
151 // that is as complete as possible (even if it contains a few bogus
152 // entries in some rare cases).
153 void **next_frame_pointer =
154 NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
155
156 if (skip_count > 0) {
157 skip_count--;
158 } else {
159 result[n] = prev_return_address;
160 if (IS_STACK_FRAMES) {
161 sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
162 }
163 n++;
164 }
165 prev_return_address = frame_pointer[1];
166 frame_pointer = next_frame_pointer;
167 }
168 if (min_dropped_frames != nullptr) {
169 // Implementation detail: we clamp the max of frames we are willing to
170 // count, so as not to spend too much time in the loop below.
171 const int kMaxUnwind = 200;
172 int j = 0;
173 for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
174 frame_pointer =
175 NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
176 }
177 *min_dropped_frames = j;
178 }
179 return n;
180}
181
182namespace absl {
183namespace debugging_internal {
184bool StackTraceWorksForTest() {
185 return true;
186}
187} // namespace debugging_internal
188} // namespace absl
189
190#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_