xref: /aosp_15_r20/art/runtime/fault_handler.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "fault_handler.h"
18 
19 #include <string.h>
20 #include <sys/mman.h>
21 #include <sys/ucontext.h>
22 
23 #include <atomic>
24 
25 #include "art_method-inl.h"
26 #include "base/logging.h"  // For VLOG
27 #include "base/membarrier.h"
28 #include "base/stl_util.h"
29 #include "dex/dex_file_types.h"
30 #include "gc/heap.h"
31 #include "jit/jit.h"
32 #include "jit/jit_code_cache.h"
33 #include "mirror/class.h"
34 #include "mirror/object_reference.h"
35 #include "oat/oat_file.h"
36 #include "oat/oat_quick_method_header.h"
37 #include "sigchain.h"
38 #include "thread-current-inl.h"
39 #include "verify_object-inl.h"
40 
41 namespace art HIDDEN {
42 // Static fault manger object accessed by signal handler.
43 FaultManager fault_manager;
44 
45 // These need to be NO_INLINE since some debuggers do not read the inline-info to set a breakpoint
46 // if they aren't.
art_sigsegv_fault()47 extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigsegv_fault() {
48   // Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
49   VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
50 }
art_sigbus_fault()51 extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigbus_fault() {
52   // Set a breakpoint here to be informed when a SIGBUS is unhandled by ART.
53   VLOG(signals) << "Caught unknown SIGBUS in ART fault handler - chaining to next handler.";
54 }
55 
56 // Signal handler called on SIGSEGV.
art_sigsegv_handler(int sig,siginfo_t * info,void * context)57 static bool art_sigsegv_handler(int sig, siginfo_t* info, void* context) {
58   return fault_manager.HandleSigsegvFault(sig, info, context);
59 }
60 
61 // Signal handler called on SIGBUS.
art_sigbus_handler(int sig,siginfo_t * info,void * context)62 static bool art_sigbus_handler(int sig, siginfo_t* info, void* context) {
63   return fault_manager.HandleSigbusFault(sig, info, context);
64 }
65 
FaultManager()66 FaultManager::FaultManager()
67     : generated_code_ranges_lock_("FaultHandler generated code ranges lock",
68                                   LockLevel::kGenericBottomLock),
69       initialized_(false) {}
70 
~FaultManager()71 FaultManager::~FaultManager() {
72 }
73 
SignalCodeName(int sig,int code)74 static const char* SignalCodeName(int sig, int code) {
75   if (sig == SIGSEGV) {
76     switch (code) {
77       case SEGV_MAPERR: return "SEGV_MAPERR";
78       case SEGV_ACCERR: return "SEGV_ACCERR";
79       case 8:           return "SEGV_MTEAERR";
80       case 9:           return "SEGV_MTESERR";
81       default:          return "SEGV_UNKNOWN";
82     }
83   } else if (sig == SIGBUS) {
84     switch (code) {
85       case BUS_ADRALN: return "BUS_ADRALN";
86       case BUS_ADRERR: return "BUS_ADRERR";
87       case BUS_OBJERR: return "BUS_OBJERR";
88       default:         return "BUS_UNKNOWN";
89     }
90   } else {
91     return "UNKNOWN";
92   }
93 }
94 
PrintSignalInfo(std::ostream & os,siginfo_t * info)95 static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
96   os << "  si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
97      << "  si_code: " << info->si_code
98      << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
99   if (info->si_signo == SIGSEGV || info->si_signo == SIGBUS) {
100     os << "\n" << "  si_addr: " << info->si_addr;
101   }
102   return os;
103 }
104 
Init(bool use_sig_chain)105 void FaultManager::Init(bool use_sig_chain) {
106   CHECK(!initialized_);
107   if (use_sig_chain) {
108     sigset_t mask;
109     sigfillset(&mask);
110     sigdelset(&mask, SIGABRT);
111     sigdelset(&mask, SIGBUS);
112     sigdelset(&mask, SIGFPE);
113     sigdelset(&mask, SIGILL);
114     sigdelset(&mask, SIGSEGV);
115 
116     SigchainAction sa = {
117         .sc_sigaction = art_sigsegv_handler,
118         .sc_mask = mask,
119         .sc_flags = 0UL,
120     };
121 
122     AddSpecialSignalHandlerFn(SIGSEGV, &sa);
123     if (gUseUserfaultfd) {
124       sa.sc_sigaction = art_sigbus_handler;
125       AddSpecialSignalHandlerFn(SIGBUS, &sa);
126     }
127 
128     // Notify the kernel that we intend to use a specific `membarrier()` command.
129     int result = art::membarrier(MembarrierCommand::kRegisterPrivateExpedited);
130     if (result != 0) {
131       LOG(WARNING) << "FaultHandler: MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED failed: "
132                    << errno << " " << strerror(errno);
133     }
134 
135     {
136       MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
137       for (size_t i = 0; i != kNumLocalGeneratedCodeRanges; ++i) {
138         GeneratedCodeRange* next = (i + 1u != kNumLocalGeneratedCodeRanges)
139             ? &generated_code_ranges_storage_[i + 1u]
140             : nullptr;
141         generated_code_ranges_storage_[i].next.store(next, std::memory_order_relaxed);
142         generated_code_ranges_storage_[i].start = nullptr;
143         generated_code_ranges_storage_[i].size = 0u;
144       }
145       free_generated_code_ranges_ = generated_code_ranges_storage_;
146     }
147 
148     initialized_ = true;
149   } else if (gUseUserfaultfd) {
150     struct sigaction act;
151     std::memset(&act, '\0', sizeof(act));
152     act.sa_flags = SA_SIGINFO | SA_RESTART;
153     act.sa_sigaction = [](int sig, siginfo_t* info, void* context) {
154       if (!art_sigbus_handler(sig, info, context)) {
155         std::ostringstream oss;
156         PrintSignalInfo(oss, info);
157         LOG(FATAL) << "Couldn't handle SIGBUS fault:"
158                    << "\n"
159                    << oss.str();
160       }
161     };
162     if (sigaction(SIGBUS, &act, nullptr)) {
163       LOG(FATAL) << "Fault handler for SIGBUS couldn't be setup: " << strerror(errno);
164     }
165   }
166 }
167 
Release()168 void FaultManager::Release() {
169   if (initialized_) {
170     RemoveSpecialSignalHandlerFn(SIGSEGV, art_sigsegv_handler);
171     if (gUseUserfaultfd) {
172       RemoveSpecialSignalHandlerFn(SIGBUS, art_sigbus_handler);
173     }
174     initialized_ = false;
175   }
176 }
177 
Shutdown()178 void FaultManager::Shutdown() {
179   if (initialized_) {
180     Release();
181 
182     // Free all handlers.
183     STLDeleteElements(&generated_code_handlers_);
184     STLDeleteElements(&other_handlers_);
185 
186     // Delete remaining code ranges if any (such as nterp code or oat code from
187     // oat files that have not been unloaded, including boot image oat files).
188     MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
189     GeneratedCodeRange* range = generated_code_ranges_.load(std::memory_order_acquire);
190     generated_code_ranges_.store(nullptr, std::memory_order_release);
191     while (range != nullptr) {
192       GeneratedCodeRange* next_range = range->next.load(std::memory_order_relaxed);
193       std::less<GeneratedCodeRange*> less;
194       if (!less(range, generated_code_ranges_storage_) &&
195           less(range, generated_code_ranges_storage_ + kNumLocalGeneratedCodeRanges)) {
196         // Nothing to do - not adding `range` to the `free_generated_code_ranges_` anymore.
197       } else {
198         // Range is not in the `generated_code_ranges_storage_`.
199         delete range;
200       }
201       range = next_range;
202     }
203   }
204 }
205 
HandleFaultByOtherHandlers(int sig,siginfo_t * info,void * context)206 bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* context) {
207   if (other_handlers_.empty()) {
208     return false;
209   }
210 
211   Thread* self = Thread::Current();
212 
213   DCHECK(self != nullptr);
214   DCHECK(Runtime::Current() != nullptr);
215   DCHECK(Runtime::Current()->IsStarted());
216   for (const auto& handler : other_handlers_) {
217     if (handler->Action(sig, info, context)) {
218       return true;
219     }
220   }
221   return false;
222 }
223 
HandleSigbusFault(int sig,siginfo_t * info,void * context)224 bool FaultManager::HandleSigbusFault(int sig, siginfo_t* info, [[maybe_unused]] void* context) {
225   DCHECK_EQ(sig, SIGBUS);
226   if (VLOG_IS_ON(signals)) {
227     PrintSignalInfo(VLOG_STREAM(signals) << "Handling SIGBUS fault:\n", info);
228   }
229 
230 #ifdef TEST_NESTED_SIGNAL
231   // Simulate a crash in a handler.
232   raise(SIGBUS);
233 #endif
234   if (Runtime::Current()->GetHeap()->MarkCompactCollector()->SigbusHandler(info)) {
235     return true;
236   }
237 
238   // Set a breakpoint in this function to catch unhandled signals.
239   art_sigbus_fault();
240   return false;
241 }
242 
CheckForUnrecognizedImplicitSuspendCheckInBootImage(siginfo_t * siginfo,void * context)243 inline void FaultManager::CheckForUnrecognizedImplicitSuspendCheckInBootImage(
244     siginfo_t* siginfo, void* context) {
245   CHECK_EQ(kRuntimeQuickCodeISA, InstructionSet::kArm64);
246   uintptr_t fault_pc = GetFaultPc(siginfo, context);
247   if (fault_pc == 0u || !IsUint<32>(fault_pc) || !IsAligned<4u>(fault_pc)) {
248     return;
249   }
250   Runtime* runtime = Runtime::Current();
251   if (runtime == nullptr) {
252     return;
253   }
254   gc::Heap* heap = runtime->GetHeap();
255   if (heap == nullptr ||
256       fault_pc < heap->GetBootImagesStartAddress() ||
257       fault_pc - heap->GetBootImagesStartAddress() >= heap->GetBootImagesSize() ||
258       reinterpret_cast<uint32_t*>(fault_pc)[0] != /*LDR x21. [x21]*/ 0xf94002b5u) {
259     return;
260   }
261   std::ostringstream oss;
262   oss << "Failed to recognize implicit suspend check at 0x" << std::hex << fault_pc << "; ";
263   Thread* thread = Thread::Current();
264   if (thread == nullptr) {
265     oss << "null thread";
266   } else {
267     oss << "thread state = " << thread->GetState() << std::boolalpha
268         << "; mutator lock shared held = " << Locks::mutator_lock_->IsSharedHeld(thread);
269   }
270   oss << "; code ranges = {";
271   GeneratedCodeRange* range = generated_code_ranges_.load(std::memory_order_acquire);
272   const char* s = "";
273   while (range != nullptr) {
274     oss << s << "{" << range->start << ", " << range->size << "}";
275     s = ", ";
276     range = range->next.load(std::memory_order_relaxed);
277   }
278   oss << "}";
279   LOG(FATAL) << oss.str();
280   UNREACHABLE();
281 }
282 
283 
HandleSigsegvFault(int sig,siginfo_t * info,void * context)284 bool FaultManager::HandleSigsegvFault(int sig, siginfo_t* info, void* context) {
285   if (VLOG_IS_ON(signals)) {
286     PrintSignalInfo(VLOG_STREAM(signals) << "Handling SIGSEGV fault:\n", info);
287   }
288 
289 #ifdef TEST_NESTED_SIGNAL
290   // Simulate a crash in a handler.
291   raise(SIGSEGV);
292 #endif
293 
294   if (IsInGeneratedCode(info, context)) {
295     VLOG(signals) << "in generated code, looking for handler";
296     for (const auto& handler : generated_code_handlers_) {
297       VLOG(signals) << "invoking Action on handler " << handler;
298       if (handler->Action(sig, info, context)) {
299         // We have handled a signal so it's time to return from the
300         // signal handler to the appropriate place.
301         return true;
302       }
303     }
304   } else if (kRuntimeQuickCodeISA == InstructionSet::kArm64) {
305     CheckForUnrecognizedImplicitSuspendCheckInBootImage(info, context);
306   }
307 
308   // We hit a signal we didn't handle.  This might be something for which
309   // we can give more information about so call all registered handlers to
310   // see if it is.
311   if (HandleFaultByOtherHandlers(sig, info, context)) {
312     return true;
313   }
314 
315   // Set a breakpoint in this function to catch unhandled signals.
316   art_sigsegv_fault();
317   return false;
318 }
319 
AddHandler(FaultHandler * handler,bool generated_code)320 void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
321   DCHECK(initialized_);
322   if (generated_code) {
323     generated_code_handlers_.push_back(handler);
324   } else {
325     other_handlers_.push_back(handler);
326   }
327 }
328 
RemoveHandler(FaultHandler * handler)329 void FaultManager::RemoveHandler(FaultHandler* handler) {
330   auto it = std::find(generated_code_handlers_.begin(), generated_code_handlers_.end(), handler);
331   if (it != generated_code_handlers_.end()) {
332     generated_code_handlers_.erase(it);
333     return;
334   }
335   auto it2 = std::find(other_handlers_.begin(), other_handlers_.end(), handler);
336   if (it2 != other_handlers_.end()) {
337     other_handlers_.erase(it2);
338     return;
339   }
340   LOG(FATAL) << "Attempted to remove non existent handler " << handler;
341 }
342 
CreateGeneratedCodeRange(const void * start,size_t size)343 inline FaultManager::GeneratedCodeRange* FaultManager::CreateGeneratedCodeRange(
344     const void* start, size_t size) {
345   GeneratedCodeRange* range = free_generated_code_ranges_;
346   if (range != nullptr) {
347     std::less<GeneratedCodeRange*> less;
348     DCHECK(!less(range, generated_code_ranges_storage_));
349     DCHECK(less(range, generated_code_ranges_storage_ + kNumLocalGeneratedCodeRanges));
350     range->start = start;
351     range->size = size;
352     free_generated_code_ranges_ = range->next.load(std::memory_order_relaxed);
353     range->next.store(nullptr, std::memory_order_relaxed);
354     return range;
355   } else {
356     return new GeneratedCodeRange{nullptr, start, size};
357   }
358 }
359 
FreeGeneratedCodeRange(GeneratedCodeRange * range)360 inline void FaultManager::FreeGeneratedCodeRange(GeneratedCodeRange* range) {
361   std::less<GeneratedCodeRange*> less;
362   if (!less(range, generated_code_ranges_storage_) &&
363       less(range, generated_code_ranges_storage_ + kNumLocalGeneratedCodeRanges)) {
364     MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
365     range->start = nullptr;
366     range->size = 0u;
367     range->next.store(free_generated_code_ranges_, std::memory_order_relaxed);
368     free_generated_code_ranges_ = range;
369   } else {
370     // Range is not in the `generated_code_ranges_storage_`.
371     delete range;
372   }
373 }
374 
AddGeneratedCodeRange(const void * start,size_t size)375 void FaultManager::AddGeneratedCodeRange(const void* start, size_t size) {
376   GeneratedCodeRange* new_range = nullptr;
377   {
378     MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
379     new_range = CreateGeneratedCodeRange(start, size);
380     GeneratedCodeRange* old_head = generated_code_ranges_.load(std::memory_order_relaxed);
381     new_range->next.store(old_head, std::memory_order_relaxed);
382     generated_code_ranges_.store(new_range, std::memory_order_release);
383   }
384 
385   // The above release operation on `generated_code_ranges_` with an acquire operation
386   // on the same atomic object in `IsInGeneratedCode()` ensures the correct memory
387   // visibility for the contents of `*new_range` for any thread that loads the value
388   // written above (or a value written by a release sequence headed by that write).
389   //
390   // However, we also need to ensure that any thread that encounters a segmentation
391   // fault in the provided range shall actually see the written value. For JIT code
392   // cache and nterp, the registration happens while the process is single-threaded
393   // but the synchronization is more complicated for code in oat files.
394   //
395   // Threads that load classes register dex files under the `Locks::dex_lock_` and
396   // the first one to register a dex file with a given oat file shall add the oat
397   // code range; the memory visibility for these threads is guaranteed by the lock.
398   // However a thread that did not try to load a class with oat code can execute the
399   // code if a direct or indirect reference to such class escapes from one of the
400   // threads that loaded it. Use `membarrier()` for memory visibility in this case.
401   art::membarrier(MembarrierCommand::kPrivateExpedited);
402 }
403 
RemoveGeneratedCodeRange(const void * start,size_t size)404 void FaultManager::RemoveGeneratedCodeRange(const void* start, size_t size) {
405   Thread* self = Thread::Current();
406   GeneratedCodeRange* range = nullptr;
407   {
408     MutexLock lock(self, generated_code_ranges_lock_);
409     std::atomic<GeneratedCodeRange*>* before = &generated_code_ranges_;
410     range = before->load(std::memory_order_relaxed);
411     while (range != nullptr && range->start != start) {
412       before = &range->next;
413       range = before->load(std::memory_order_relaxed);
414     }
415     if (range != nullptr) {
416       GeneratedCodeRange* next = range->next.load(std::memory_order_relaxed);
417       if (before == &generated_code_ranges_) {
418         // Relaxed store directly to `generated_code_ranges_` would not satisfy
419         // conditions for a release sequence, so we need to use store-release.
420         before->store(next, std::memory_order_release);
421       } else {
422         // In the middle of the list, we can use a relaxed store as we're not
423         // publishing any newly written memory to potential reader threads.
424         // Whether they see the removed node or not is unimportant as we should
425         // not execute that code anymore. We're keeping the `next` link of the
426         // removed node, so that concurrent walk can use it to reach remaining
427         // retained nodes, if any.
428         before->store(next, std::memory_order_relaxed);
429       }
430     }
431   }
432   CHECK(range != nullptr);
433   DCHECK_EQ(range->start, start);
434   CHECK_EQ(range->size, size);
435 
436   Runtime* runtime = Runtime::Current();
437   CHECK(runtime != nullptr);
438   if (runtime->IsStarted() && runtime->GetThreadList() != nullptr) {
439     // Run a checkpoint before deleting the range to ensure that no thread holds a
440     // pointer to the removed range while walking the list in `IsInGeneratedCode()`.
441     // That walk is guarded by checking that the thread is `Runnable`, so any walk
442     // started before the removal shall be done when running the checkpoint and the
443     // checkpoint also ensures the correct memory visibility of `next` links,
444     // so the thread shall not see the pointer during future walks.
445 
446     // This function is currently called in different mutex and thread states.
447     // Semi-space GC performs the cleanup during its `MarkingPhase()` while holding
448     // the mutator exclusively, so we do not need a checkpoint. All other GCs perform
449     // the cleanup in their `ReclaimPhase()` while holding the mutator lock as shared
450     // and it's safe to release and re-acquire the mutator lock. Despite holding the
451     // mutator lock as shared, the thread is not always marked as `Runnable`.
452     // TODO: Clean up state transitions in different GC implementations. b/259440389
453     if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
454       // We do not need a checkpoint because no other thread is Runnable.
455     } else {
456       DCHECK(Locks::mutator_lock_->IsSharedHeld(self));
457       // Use explicit state transitions or unlock/lock.
458       bool runnable = (self->GetState() == ThreadState::kRunnable);
459       if (runnable) {
460         self->TransitionFromRunnableToSuspended(ThreadState::kNative);
461       } else {
462         Locks::mutator_lock_->SharedUnlock(self);
463       }
464       DCHECK(!Locks::mutator_lock_->IsSharedHeld(self));
465       runtime->GetThreadList()->RunEmptyCheckpoint();
466       if (runnable) {
467         self->TransitionFromSuspendedToRunnable();
468       } else {
469         Locks::mutator_lock_->SharedLock(self);
470       }
471     }
472   }
473   FreeGeneratedCodeRange(range);
474 }
475 
476 // This function is called within the signal handler. It checks that the thread
477 // is `Runnable`, the `mutator_lock_` is held (shared) and the fault PC is in one
478 // of the registered generated code ranges. No annotalysis is done.
IsInGeneratedCode(siginfo_t * siginfo,void * context)479 bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context) {
480   // We can only be running Java code in the current thread if it
481   // is in Runnable state.
482   VLOG(signals) << "Checking for generated code";
483   Thread* thread = Thread::Current();
484   if (thread == nullptr) {
485     VLOG(signals) << "no current thread";
486     return false;
487   }
488 
489   ThreadState state = thread->GetState();
490   if (state != ThreadState::kRunnable) {
491     VLOG(signals) << "not runnable";
492     return false;
493   }
494 
495   // Current thread is runnable.
496   // Make sure it has the mutator lock.
497   if (!Locks::mutator_lock_->IsSharedHeld(thread)) {
498     VLOG(signals) << "no lock";
499     return false;
500   }
501 
502   uintptr_t fault_pc = GetFaultPc(siginfo, context);
503   if (fault_pc == 0u) {
504     VLOG(signals) << "no fault PC";
505     return false;
506   }
507 
508   // Walk over the list of registered code ranges.
509   GeneratedCodeRange* range = generated_code_ranges_.load(std::memory_order_acquire);
510   while (range != nullptr) {
511     if (fault_pc - reinterpret_cast<uintptr_t>(range->start) < range->size) {
512       return true;
513     }
514     // We may or may not see ranges that were concurrently removed, depending
515     // on when the relaxed writes of the `next` links become visible. However,
516     // even if we're currently at a node that is being removed, we shall visit
517     // all remaining ranges that are not being removed as the removed nodes
518     // retain the `next` link at the time of removal (which may lead to other
519     // removed nodes before reaching remaining retained nodes, if any). Correct
520     // memory visibility of `start` and `size` fields of the visited ranges is
521     // ensured by the release and acquire operations on `generated_code_ranges_`.
522     range = range->next.load(std::memory_order_relaxed);
523   }
524   return false;
525 }
526 
FaultHandler(FaultManager * manager)527 FaultHandler::FaultHandler(FaultManager* manager) : manager_(manager) {
528 }
529 
530 //
531 // Null pointer fault handler
532 //
NullPointerHandler(FaultManager * manager)533 NullPointerHandler::NullPointerHandler(FaultManager* manager) : FaultHandler(manager) {
534   manager_->AddHandler(this, true);
535 }
536 
IsValidMethod(ArtMethod * method)537 bool NullPointerHandler::IsValidMethod(ArtMethod* method) {
538   // At this point we know that the thread is `Runnable` and the PC is in one of
539   // the registered code ranges. The `method` was read from the top of the stack
540   // and should really point to an actual `ArtMethod`, unless we're crashing during
541   // prologue or epilogue, or somehow managed to jump to the compiled code by some
542   // unexpected path, other than method invoke or exception delivery. We do a few
543   // quick checks without guarding from another fault.
544   VLOG(signals) << "potential method: " << method;
545 
546   static_assert(IsAligned<sizeof(void*)>(ArtMethod::Size(kRuntimePointerSize)));
547   if (method == nullptr || !IsAligned<sizeof(void*)>(method)) {
548     VLOG(signals) << ((method == nullptr) ? "null method" : "unaligned method");
549     return false;
550   }
551 
552   // Check that the presumed method actually points to a class. Read barriers
553   // are not needed (and would be undesirable in a signal handler) when reading
554   // a chain of constant references to get to a non-movable `Class.class` object.
555 
556   // Note: Allowing nested faults. Checking that the method is in one of the
557   // `LinearAlloc` spaces, or that objects we look at are in the `Heap` would be
558   // slow and require locking a mutex, which is undesirable in a signal handler.
559   // (Though we could register valid ranges similarly to the generated code ranges.)
560 
561   mirror::Object* klass =
562       method->GetDeclaringClassAddressWithoutBarrier()->AsMirrorPtr();
563   if (klass == nullptr || !IsAligned<kObjectAlignment>(klass)) {
564     VLOG(signals) << ((klass == nullptr) ? "null class" : "unaligned class");
565     return false;
566   }
567 
568   mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
569   if (class_class == nullptr || !IsAligned<kObjectAlignment>(class_class)) {
570     VLOG(signals) << ((klass == nullptr) ? "null class_class" : "unaligned class_class");
571     return false;
572   }
573 
574   if (class_class != class_class->GetClass<kVerifyNone, kWithoutReadBarrier>()) {
575     VLOG(signals) << "invalid class_class";
576     return false;
577   }
578 
579   return true;
580 }
581 
IsValidReturnPc(ArtMethod ** sp,uintptr_t return_pc)582 bool NullPointerHandler::IsValidReturnPc(ArtMethod** sp, uintptr_t return_pc) {
583   // Check if we can associate a dex PC with the return PC, whether from Nterp,
584   // or with an existing stack map entry for a compiled method.
585   // Note: Allowing nested faults if `IsValidMethod()` returned a false positive.
586   // Note: The `ArtMethod::GetOatQuickMethodHeader()` can acquire locks (at least
587   // `Locks::jit_lock_`) and if the thread already held such a lock, the signal
588   // handler would deadlock. However, if a thread is holding one of the locks
589   // below the mutator lock, the PC should be somewhere in ART code and should
590   // not match any registered generated code range, so such as a deadlock is
591   // unlikely. If it happens anyway, the worst case is that an internal ART crash
592   // would be reported as ANR.
593   ArtMethod* method = *sp;
594   const OatQuickMethodHeader* method_header = method->GetOatQuickMethodHeader(return_pc);
595   if (method_header == nullptr) {
596     VLOG(signals) << "No method header.";
597     return false;
598   }
599   VLOG(signals) << "looking for dex pc for return pc 0x" << std::hex << return_pc
600                 << " pc offset: 0x" << std::hex
601                 << (return_pc - reinterpret_cast<uintptr_t>(method_header->GetEntryPoint()));
602   uint32_t dexpc = method_header->ToDexPc(reinterpret_cast<ArtMethod**>(sp), return_pc, false);
603   VLOG(signals) << "dexpc: " << dexpc;
604   return dexpc != dex::kDexNoIndex;
605 }
606 
607 //
608 // Suspension fault handler
609 //
SuspensionHandler(FaultManager * manager)610 SuspensionHandler::SuspensionHandler(FaultManager* manager) : FaultHandler(manager) {
611   manager_->AddHandler(this, true);
612 }
613 
614 //
615 // Stack overflow fault handler
616 //
StackOverflowHandler(FaultManager * manager)617 StackOverflowHandler::StackOverflowHandler(FaultManager* manager) : FaultHandler(manager) {
618   manager_->AddHandler(this, true);
619 }
620 
621 //
622 // Stack trace handler, used to help get a stack trace from SIGSEGV inside of compiled code.
623 //
JavaStackTraceHandler(FaultManager * manager)624 JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandler(manager) {
625   manager_->AddHandler(this, false);
626 }
627 
Action(int sig,siginfo_t * siginfo,void * context)628 bool JavaStackTraceHandler::Action([[maybe_unused]] int sig, siginfo_t* siginfo, void* context) {
629   // Make sure that we are in the generated code, but we may not have a dex pc.
630   bool in_generated_code = manager_->IsInGeneratedCode(siginfo, context);
631   if (in_generated_code) {
632     LOG(ERROR) << "Dumping java stack trace for crash in generated code";
633     Thread* self = Thread::Current();
634 
635     uintptr_t sp = FaultManager::GetFaultSp(context);
636     CHECK_NE(sp, 0u);  // Otherwise we should not have reached this handler.
637     // Inside of generated code, sp[0] is the method, so sp is the frame.
638     self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp));
639     self->DumpJavaStack(LOG_STREAM(ERROR));
640   }
641 
642   return false;  // Return false since we want to propagate the fault to the main signal handler.
643 }
644 
645 }   // namespace art
646