xref: /aosp_15_r20/external/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc (revision 9356374a3709195abf420251b3e825997ff56c0f)
1#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
2#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
3
4// Generate stack tracer for aarch64
5
6#if defined(__linux__)
7#include <signal.h>
8#include <sys/mman.h>
9#include <ucontext.h>
10#include <unistd.h>
11#endif
12
13#include <atomic>
14#include <cassert>
15#include <cstdint>
16#include <iostream>
17#include <limits>
18
19#include "absl/base/attributes.h"
20#include "absl/debugging/internal/address_is_readable.h"
21#include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
22#include "absl/debugging/stacktrace.h"
23
24static const size_t kUnknownFrameSize = 0;
25// Stack end to use when we don't know the actual stack end
26// (effectively just the end of address space).
27constexpr uintptr_t kUnknownStackEnd =
28    std::numeric_limits<size_t>::max() - sizeof(void *);
29
30#if defined(__linux__)
31// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
32static const unsigned char* GetKernelRtSigreturnAddress() {
33  constexpr uintptr_t kImpossibleAddress = 1;
34  ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
35  uintptr_t address = memoized.load(std::memory_order_relaxed);
36  if (address != kImpossibleAddress) {
37    return reinterpret_cast<const unsigned char*>(address);
38  }
39
40  address = reinterpret_cast<uintptr_t>(nullptr);
41
42#ifdef ABSL_HAVE_VDSO_SUPPORT
43  absl::debugging_internal::VDSOSupport vdso;
44  if (vdso.IsPresent()) {
45    absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
46    auto lookup = [&](int type) {
47      return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
48                               &symbol_info);
49    };
50    if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
51        symbol_info.address == nullptr) {
52      // Unexpected: VDSO is present, yet the expected symbol is missing
53      // or null.
54      assert(false && "VDSO is present, but doesn't have expected symbol");
55    } else {
56      if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
57          kImpossibleAddress) {
58        address = reinterpret_cast<uintptr_t>(symbol_info.address);
59      } else {
60        assert(false && "VDSO returned invalid address");
61      }
62    }
63  }
64#endif
65
66  memoized.store(address, std::memory_order_relaxed);
67  return reinterpret_cast<const unsigned char*>(address);
68}
69#endif  // __linux__
70
71// Compute the size of a stack frame in [low..high).  We assume that
72// low < high.  Return size of kUnknownFrameSize.
73template<typename T>
74static size_t ComputeStackFrameSize(const T* low,
75                                           const T* high) {
76  const char* low_char_ptr = reinterpret_cast<const char *>(low);
77  const char* high_char_ptr = reinterpret_cast<const char *>(high);
78  return low < high ? static_cast<size_t>(high_char_ptr - low_char_ptr)
79                    : kUnknownFrameSize;
80}
81
82// Saves stack info that is expensive to calculate to avoid recalculating per frame.
83struct StackInfo {
84  uintptr_t stack_low;
85  uintptr_t stack_high;
86  uintptr_t sig_stack_low;
87  uintptr_t sig_stack_high;
88};
89
90static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) {
91  uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr);
92  if (stack_info->sig_stack_high == kUnknownStackEnd)
93    return false;
94  return (comparable_ptr >= stack_info->sig_stack_low &&
95          comparable_ptr < stack_info->sig_stack_high);
96}
97
98// Given a pointer to a stack frame, locate and return the calling
99// stackframe, or return null if no stackframe can be found. Perform sanity
100// checks (the strictness of which is controlled by the boolean parameter
101// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
102template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
103ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
104ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY  // May read random elements from stack.
105static void **NextStackFrame(void **old_frame_pointer, const void *uc,
106                             const StackInfo *stack_info) {
107  void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
108
109#if defined(__linux__)
110  if (WITH_CONTEXT && uc != nullptr) {
111    // Check to see if next frame's return address is __kernel_rt_sigreturn.
112    if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
113      const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
114      // old_frame_pointer[0] is not suitable for unwinding, look at
115      // ucontext to discover frame pointer before signal.
116      void **const pre_signal_frame_pointer =
117          reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
118
119      // The most recent signal always needs special handling to find the frame
120      // pointer, but a nested signal does not.  If pre_signal_frame_pointer is
121      // earlier in the stack than the old_frame_pointer, then use it. If it is
122      // later, then we have already unwound through it and it needs no special
123      // handling.
124      if (pre_signal_frame_pointer >= old_frame_pointer) {
125        new_frame_pointer = pre_signal_frame_pointer;
126      }
127  }
128#endif
129
130  // The frame pointer should be 8-byte aligned.
131  if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
132    return nullptr;
133
134  // Check that alleged frame pointer is actually readable. This is to
135  // prevent "double fault" in case we hit the first fault due to e.g.
136  // stack corruption.
137  if (!absl::debugging_internal::AddressIsReadable(
138          new_frame_pointer))
139    return nullptr;
140  }
141
142  // Only check the size if both frames are in the same stack.
143  if (InsideSignalStack(new_frame_pointer, stack_info) ==
144      InsideSignalStack(old_frame_pointer, stack_info)) {
145    // Check frame size.  In strict mode, we assume frames to be under
146    // 100,000 bytes.  In non-strict mode, we relax the limit to 1MB.
147    const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
148    const size_t frame_size =
149        ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
150    if (frame_size == kUnknownFrameSize)
151       return nullptr;
152    // A very large frame may mean corrupt memory or an erroneous frame
153    // pointer. But also maybe just a plain-old large frame.  Assume that if the
154    // frame is within a known stack, then it is valid.
155    if (frame_size > max_size) {
156      size_t stack_low = stack_info->stack_low;
157      size_t stack_high = stack_info->stack_high;
158      if (InsideSignalStack(new_frame_pointer, stack_info)) {
159        stack_low = stack_info->sig_stack_low;
160        stack_high = stack_info->sig_stack_high;
161      }
162      if (stack_high < kUnknownStackEnd &&
163          static_cast<size_t>(getpagesize()) < stack_low) {
164        const uintptr_t new_fp_u =
165            reinterpret_cast<uintptr_t>(new_frame_pointer);
166        // Stack bounds are known.
167        if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
168          // new_frame_pointer is not within a known stack.
169          return nullptr;
170        }
171      } else {
172        // Stack bounds are unknown, prefer truncated stack to possible crash.
173        return nullptr;
174      }
175    }
176  }
177
178  return new_frame_pointer;
179}
180
181template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
182// We count on the bottom frame being this one. See the comment
183// at prev_return_address
184ABSL_ATTRIBUTE_NOINLINE
185ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
186ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
187static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
188                      const void *ucp, int *min_dropped_frames) {
189#ifdef __GNUC__
190  void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
191#else
192# error reading stack point not yet supported on this platform.
193#endif
194  skip_count++;    // Skip the frame for this function.
195  int n = 0;
196
197  // Assume that the first page is not stack.
198  StackInfo stack_info;
199  stack_info.stack_low = static_cast<uintptr_t>(getpagesize());
200  stack_info.stack_high = kUnknownStackEnd;
201  stack_info.sig_stack_low = stack_info.stack_low;
202  stack_info.sig_stack_high = kUnknownStackEnd;
203
204  // The frame pointer points to low address of a frame.  The first 64-bit
205  // word of a frame points to the next frame up the call chain, which normally
206  // is just after the high address of the current frame.  The second word of
207  // a frame contains return address of to the caller.   To find a pc value
208  // associated with the current frame, we need to go down a level in the call
209  // chain.  So we remember return the address of the last frame seen.  This
210  // does not work for the first stack frame, which belongs to UnwindImp() but
211  // we skip the frame for UnwindImp() anyway.
212  void* prev_return_address = nullptr;
213  // The nth frame size is the difference between the nth frame pointer and the
214  // the frame pointer below it in the call chain. There is no frame below the
215  // leaf frame, but this function is the leaf anyway, and we skip it.
216  void** prev_frame_pointer = nullptr;
217
218   while (frame_pointer && n < max_depth) {
219    if (skip_count > 0) {
220      skip_count--;
221    } else {
222      result[n] = prev_return_address;
223      if (IS_STACK_FRAMES) {
224        sizes[n] = static_cast<int>(
225            ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
226      }
227      n++;
228    }
229    prev_return_address = frame_pointer[1];
230    prev_frame_pointer = frame_pointer;
231    // The absl::GetStackFrames routine is called when we are in some
232    // informational context (the failure signal handler for example).
233    // Use the non-strict unwinding rules to produce a stack trace
234    // that is as complete as possible (even if it contains a few bogus
235    // entries in some rare cases).
236    frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
237        frame_pointer, ucp, &stack_info);
238  }
239
240  if (min_dropped_frames != nullptr) {
241    // Implementation detail: we clamp the max of frames we are willing to
242    // count, so as not to spend too much time in the loop below.
243    const int kMaxUnwind = 200;
244    int num_dropped_frames = 0;
245    for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
246      if (skip_count > 0) {
247        skip_count--;
248      } else {
249        num_dropped_frames++;
250      }
251      frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
252          frame_pointer, ucp, &stack_info);
253    }
254    *min_dropped_frames = num_dropped_frames;
255  }
256  return n;
257}
258
259namespace absl {
260ABSL_NAMESPACE_BEGIN
261namespace debugging_internal {
262bool StackTraceWorksForTest() {
263  return true;
264}
265}  // namespace debugging_internal
266ABSL_NAMESPACE_END
267}  // namespace absl
268
269#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
270