blob: 3a070ee45d4b00d5f218d1a88793413803e93a2f [file] [log] [blame]
Austin Schuh36244a12019-09-21 17:52:38 -07001// Copyright 2017 The Abseil Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14//
15// Produce stack trace. I'm guessing (hoping!) the code is much like
16// for x86. For apple machines, at least, it seems to be; see
17// https://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html
18// https://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK
19// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882
20
21#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
22#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
23
24#if defined(__linux__)
25#include <asm/ptrace.h> // for PT_NIP.
26#include <ucontext.h> // for ucontext_t
27#endif
28
29#include <unistd.h>
30#include <cassert>
31#include <cstdint>
32#include <cstdio>
33
34#include "absl/base/attributes.h"
35#include "absl/base/optimization.h"
36#include "absl/base/port.h"
37#include "absl/debugging/stacktrace.h"
38#include "absl/debugging/internal/address_is_readable.h"
39#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
40
41// Given a stack pointer, return the saved link register value.
42// Note that this is the link register for a callee.
43static inline void *StacktracePowerPCGetLR(void **sp) {
44 // PowerPC has 3 main ABIs, which say where in the stack the
45 // Link Register is. For DARWIN and AIX (used by apple and
46 // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc),
47 // it's in sp[1].
48#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
49 return *(sp+2);
50#elif defined(_CALL_SYSV)
51 return *(sp+1);
52#elif defined(__APPLE__) || defined(__FreeBSD__) || \
53 (defined(__linux__) && defined(__PPC64__))
54 // This check is in case the compiler doesn't define _CALL_AIX/etc.
55 return *(sp+2);
56#elif defined(__linux)
57 // This check is in case the compiler doesn't define _CALL_SYSV.
58 return *(sp+1);
59#else
60#error Need to specify the PPC ABI for your archiecture.
61#endif
62}
63
64// Given a pointer to a stack frame, locate and return the calling
65// stackframe, or return null if no stackframe can be found. Perform sanity
66// checks (the strictness of which is controlled by the boolean parameter
67// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
68template<bool STRICT_UNWINDING, bool IS_WITH_CONTEXT>
69ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
70ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
71static void **NextStackFrame(void **old_sp, const void *uc) {
72 void **new_sp = (void **) *old_sp;
73 enum { kStackAlignment = 16 };
74
75 // Check that the transition from frame pointer old_sp to frame
76 // pointer new_sp isn't clearly bogus
77 if (STRICT_UNWINDING) {
78 // With the stack growing downwards, older stack frame must be
79 // at a greater address that the current one.
80 if (new_sp <= old_sp) return nullptr;
81 // Assume stack frames larger than 100,000 bytes are bogus.
82 if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
83 } else {
84 // In the non-strict mode, allow discontiguous stack frames.
85 // (alternate-signal-stacks for example).
86 if (new_sp == old_sp) return nullptr;
87 // And allow frames upto about 1MB.
88 if ((new_sp > old_sp)
89 && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
90 }
91 if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr;
92
93#if defined(__linux__)
94 enum StackTraceKernelSymbolStatus {
95 kNotInitialized = 0, kAddressValid, kAddressInvalid };
96
97 if (IS_WITH_CONTEXT && uc != nullptr) {
98 static StackTraceKernelSymbolStatus kernel_symbol_status =
99 kNotInitialized; // Sentinel: not computed yet.
100 // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not
101 // possibly be there.
102 static const unsigned char *kernel_sigtramp_rt64_address = nullptr;
103 if (kernel_symbol_status == kNotInitialized) {
104 absl::debugging_internal::VDSOSupport vdso;
105 if (vdso.IsPresent()) {
106 absl::debugging_internal::VDSOSupport::SymbolInfo
107 sigtramp_rt64_symbol_info;
108 if (!vdso.LookupSymbol(
109 "__kernel_sigtramp_rt64", "LINUX_2.6.15",
110 absl::debugging_internal::VDSOSupport::kVDSOSymbolType,
111 &sigtramp_rt64_symbol_info) ||
112 sigtramp_rt64_symbol_info.address == nullptr) {
113 // Unexpected: VDSO is present, yet the expected symbol is missing
114 // or null.
115 assert(false && "VDSO is present, but doesn't have expected symbol");
116 kernel_symbol_status = kAddressInvalid;
117 } else {
118 kernel_sigtramp_rt64_address =
119 reinterpret_cast<const unsigned char *>(
120 sigtramp_rt64_symbol_info.address);
121 kernel_symbol_status = kAddressValid;
122 }
123 } else {
124 kernel_symbol_status = kAddressInvalid;
125 }
126 }
127
128 if (new_sp != nullptr &&
129 kernel_symbol_status == kAddressValid &&
130 StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) {
131 const ucontext_t* signal_context =
132 reinterpret_cast<const ucontext_t*>(uc);
133 void **const sp_before_signal =
134 reinterpret_cast<void**>(signal_context->uc_mcontext.gp_regs[PT_R1]);
135 // Check that alleged sp before signal is nonnull and is reasonably
136 // aligned.
137 if (sp_before_signal != nullptr &&
138 ((uintptr_t)sp_before_signal % kStackAlignment) == 0) {
139 // Check that alleged stack pointer is actually readable. This is to
140 // prevent a "double fault" in case we hit the first fault due to e.g.
141 // a stack corruption.
142 if (absl::debugging_internal::AddressIsReadable(sp_before_signal)) {
143 // Alleged stack pointer is readable, use it for further unwinding.
144 new_sp = sp_before_signal;
145 }
146 }
147 }
148 }
149#endif
150
151 return new_sp;
152}
153
154// This ensures that absl::GetStackTrace sets up the Link Register properly.
155ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() {
156 ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
157}
158
159template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
160ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
161ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
162static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
163 const void *ucp, int *min_dropped_frames) {
164 void **sp;
165 // Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
166 // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a
167 // different asm syntax. I don't know quite the best way to discriminate
168 // systems using the old as from the new one; I've gone with __APPLE__.
169#ifdef __APPLE__
170 __asm__ volatile ("mr %0,r1" : "=r" (sp));
171#else
172 __asm__ volatile ("mr %0,1" : "=r" (sp));
173#endif
174
175 // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack
176 // entry that holds the return address of the subroutine call (what
177 // instruction we run after our function finishes). This is the
178 // same as the stack-pointer of our parent routine, which is what we
179 // want here. While the compiler will always(?) set up LR for
180 // subroutine calls, it may not for leaf functions (such as this one).
181 // This routine forces the compiler (at least gcc) to push it anyway.
182 AbslStacktracePowerPCDummyFunction();
183
184 // The LR save area is used by the callee, so the top entry is bogus.
185 skip_count++;
186
187 int n = 0;
188
189 // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in
190 // the link register) of a function call is stored in the caller's stack
191 // frame instead of the callee's. When we look for the return address
192 // associated with a stack frame, we need to make sure that there is a
193 // caller frame before it. So we call NextStackFrame before entering the
194 // loop below and check next_sp instead of sp for loop termination.
195 // The outermost frame is set up by runtimes and it does not have a
196 // caller frame, so it is skipped.
197
198 // The absl::GetStackFrames routine is called when we are in some
199 // informational context (the failure signal handler for example).
200 // Use the non-strict unwinding rules to produce a stack trace
201 // that is as complete as possible (even if it contains a few
202 // bogus entries in some rare cases).
203 void **next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
204
205 while (next_sp && n < max_depth) {
206 if (skip_count > 0) {
207 skip_count--;
208 } else {
209 result[n] = StacktracePowerPCGetLR(sp);
210 if (IS_STACK_FRAMES) {
211 if (next_sp > sp) {
212 sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
213 } else {
214 // A frame-size of 0 is used to indicate unknown frame size.
215 sizes[n] = 0;
216 }
217 }
218 n++;
219 }
220
221 sp = next_sp;
222 next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
223 }
224
225 if (min_dropped_frames != nullptr) {
226 // Implementation detail: we clamp the max of frames we are willing to
227 // count, so as not to spend too much time in the loop below.
228 const int kMaxUnwind = 1000;
229 int j = 0;
230 for (; next_sp != nullptr && j < kMaxUnwind; j++) {
231 next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
232 }
233 *min_dropped_frames = j;
234 }
235 return n;
236}
237
238namespace absl {
239namespace debugging_internal {
240bool StackTraceWorksForTest() {
241 return true;
242}
243} // namespace debugging_internal
244} // namespace absl
245
246#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_