xref: /aosp_15_r20/art/runtime/thread_list.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "thread_list.h"
18 
19 #include <dirent.h>
20 #include <nativehelper/scoped_local_ref.h>
21 #include <nativehelper/scoped_utf_chars.h>
22 #include <sys/resource.h>  // For getpriority()
23 #include <sys/types.h>
24 #include <unistd.h>
25 
26 #include <map>
27 #include <sstream>
28 #include <tuple>
29 #include <vector>
30 
31 #include "android-base/stringprintf.h"
32 #include "art_field-inl.h"
33 #include "base/aborting.h"
34 #include "base/histogram-inl.h"
35 #include "base/mutex-inl.h"
36 #include "base/systrace.h"
37 #include "base/time_utils.h"
38 #include "base/timing_logger.h"
39 #include "debugger.h"
40 #include "gc/collector/concurrent_copying.h"
41 #include "gc/gc_pause_listener.h"
42 #include "gc/heap.h"
43 #include "gc/reference_processor.h"
44 #include "gc_root.h"
45 #include "jni/jni_internal.h"
46 #include "lock_word.h"
47 #include "mirror/string.h"
48 #include "monitor.h"
49 #include "native_stack_dump.h"
50 #include "obj_ptr-inl.h"
51 #include "scoped_thread_state_change-inl.h"
52 #include "thread.h"
53 #include "trace.h"
54 #include "unwindstack/AndroidUnwinder.h"
55 #include "well_known_classes.h"
56 
57 #if ART_USE_FUTEXES
58 #include <linux/futex.h>
59 #include <sys/syscall.h>
60 #endif  // ART_USE_FUTEXES
61 
62 namespace art HIDDEN {
63 
64 using android::base::StringPrintf;
65 
66 static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
67 
68 // Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for
69 // some history.
70 static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = true;
71 
ThreadList(uint64_t thread_suspend_timeout_ns)72 ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns)
73     : suspend_all_count_(0),
74       unregistering_count_(0),
75       suspend_all_histogram_("suspend all histogram", 16, 64),
76       long_suspend_(false),
77       shut_down_(false),
78       thread_suspend_timeout_ns_(thread_suspend_timeout_ns),
79       empty_checkpoint_barrier_(new Barrier(0)) {
80   CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
81 }
82 
~ThreadList()83 ThreadList::~ThreadList() {
84   CHECK(shut_down_);
85 }
86 
ShutDown()87 void ThreadList::ShutDown() {
88   ScopedTrace trace(__PRETTY_FUNCTION__);
89   // Detach the current thread if necessary. If we failed to start, there might not be any threads.
90   // We need to detach the current thread here in case there's another thread waiting to join with
91   // us.
92   bool contains = false;
93   Thread* self = Thread::Current();
94   {
95     MutexLock mu(self, *Locks::thread_list_lock_);
96     contains = Contains(self);
97   }
98   if (contains) {
99     Runtime::Current()->DetachCurrentThread();
100   }
101   WaitForOtherNonDaemonThreadsToExit();
102   // The only caller of this function, ~Runtime, has already disabled GC and
103   // ensured that the last GC is finished.
104   gc::Heap* const heap = Runtime::Current()->GetHeap();
105   CHECK(heap->IsGCDisabledForShutdown());
106 
107   // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
108   //       Thread::Init.
109   SuspendAllDaemonThreadsForShutdown();
110 
111   shut_down_ = true;
112 }
113 
Contains(Thread * thread)114 bool ThreadList::Contains(Thread* thread) {
115   return find(list_.begin(), list_.end(), thread) != list_.end();
116 }
117 
GetLockOwner()118 pid_t ThreadList::GetLockOwner() {
119   return Locks::thread_list_lock_->GetExclusiveOwnerTid();
120 }
121 
DumpNativeStacks(std::ostream & os)122 void ThreadList::DumpNativeStacks(std::ostream& os) {
123   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
124   unwindstack::AndroidLocalUnwinder unwinder;
125   for (const auto& thread : list_) {
126     os << "DUMPING THREAD " << thread->GetTid() << "\n";
127     DumpNativeStack(os, unwinder, thread->GetTid(), "\t");
128     os << "\n";
129   }
130 }
131 
DumpForSigQuit(std::ostream & os)132 void ThreadList::DumpForSigQuit(std::ostream& os) {
133   {
134     ScopedObjectAccess soa(Thread::Current());
135     // Only print if we have samples.
136     if (suspend_all_histogram_.SampleSize() > 0) {
137       Histogram<uint64_t>::CumulativeData data;
138       suspend_all_histogram_.CreateHistogram(&data);
139       suspend_all_histogram_.PrintConfidenceIntervals(os, 0.99, data);  // Dump time to suspend.
140     }
141   }
142   bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
143   Dump(os, dump_native_stack);
144   DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
145 }
146 
DumpUnattachedThread(std::ostream & os,pid_t tid,bool dump_native_stack)147 static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
148     NO_THREAD_SAFETY_ANALYSIS {
149   // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
150   // refactor DumpState to avoid skipping analysis.
151   Thread::DumpState(os, nullptr, tid);
152   if (dump_native_stack) {
153     DumpNativeStack(os, tid, "  native: ");
154   }
155   os << std::endl;
156 }
157 
DumpUnattachedThreads(std::ostream & os,bool dump_native_stack)158 void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
159   DIR* d = opendir("/proc/self/task");
160   if (!d) {
161     return;
162   }
163 
164   Thread* self = Thread::Current();
165   dirent* e;
166   while ((e = readdir(d)) != nullptr) {
167     char* end;
168     pid_t tid = strtol(e->d_name, &end, 10);
169     if (!*end) {
170       Thread* thread;
171       {
172         MutexLock mu(self, *Locks::thread_list_lock_);
173         thread = FindThreadByTid(tid);
174       }
175       if (thread == nullptr) {
176         DumpUnattachedThread(os, tid, dump_native_stack);
177       }
178     }
179   }
180   closedir(d);
181 }
182 
183 // Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be
184 // overloaded with ANR dumps.
185 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
186 
187 // A closure used by Thread::Dump.
188 class DumpCheckpoint final : public Closure {
189  public:
DumpCheckpoint(bool dump_native_stack)190   DumpCheckpoint(bool dump_native_stack)
191       : lock_("Dump checkpoint lock", kGenericBottomLock),
192         os_(),
193         // Avoid verifying count in case a thread doesn't end up passing through the barrier.
194         // This avoids a SIGABRT that would otherwise happen in the destructor.
195         barrier_(0, /*verify_count_on_shutdown=*/false),
196         unwinder_(std::vector<std::string>{}, std::vector<std::string> {"oat", "odex"}),
197         dump_native_stack_(dump_native_stack) {
198   }
199 
Run(Thread * thread)200   void Run(Thread* thread) override {
201     // Note thread and self may not be equal if thread was already suspended at the point of the
202     // request.
203     Thread* self = Thread::Current();
204     CHECK(self != nullptr);
205     std::ostringstream local_os;
206     Locks::mutator_lock_->AssertSharedHeld(self);
207     Thread::DumpOrder dump_order = thread->Dump(local_os, unwinder_, dump_native_stack_);
208     {
209       MutexLock mu(self, lock_);
210       // Sort, so that the most interesting threads for ANR are printed first (ANRs can be trimmed).
211       std::pair<Thread::DumpOrder, uint32_t> sort_key(dump_order, thread->GetThreadId());
212       os_.emplace(sort_key, std::move(local_os));
213     }
214     barrier_.Pass(self);
215   }
216 
217   // Called at the end to print all the dumps in sequential prioritized order.
Dump(Thread * self,std::ostream & os)218   void Dump(Thread* self, std::ostream& os) {
219     MutexLock mu(self, lock_);
220     for (const auto& it : os_) {
221       os << it.second.str() << std::endl;
222     }
223   }
224 
WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint)225   void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
226     Thread* self = Thread::Current();
227     ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
228     bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
229     if (timed_out) {
230       // Avoid a recursive abort.
231       LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
232           << "Unexpected time out during dump checkpoint.";
233     }
234   }
235 
236  private:
237   // Storage for the per-thread dumps (guarded by lock since they are generated in parallel).
238   // Map is used to obtain sorted order. The key is unique, but use multimap just in case.
239   Mutex lock_;
240   std::multimap<std::pair<Thread::DumpOrder, uint32_t>, std::ostringstream> os_ GUARDED_BY(lock_);
241   // The barrier to be passed through and for the requestor to wait upon.
242   Barrier barrier_;
243   // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
244   unwindstack::AndroidLocalUnwinder unwinder_;
245   // Whether we should dump the native stack.
246   const bool dump_native_stack_;
247 };
248 
Dump(std::ostream & os,bool dump_native_stack)249 void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
250   Thread* self = Thread::Current();
251   {
252     MutexLock mu(self, *Locks::thread_list_lock_);
253     os << "DALVIK THREADS (" << list_.size() << "):\n";
254   }
255   if (self != nullptr) {
256     DumpCheckpoint checkpoint(dump_native_stack);
257     // Acquire mutator lock separately for each thread, to avoid long runnable code sequence
258     // without suspend checks.
259     size_t threads_running_checkpoint = RunCheckpoint(&checkpoint,
260                                                       nullptr,
261                                                       true,
262                                                       /* acquire_mutator_lock= */ true);
263     if (threads_running_checkpoint != 0) {
264       checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
265     }
266     checkpoint.Dump(self, os);
267   } else {
268     DumpUnattachedThreads(os, dump_native_stack);
269   }
270 }
271 
AssertOtherThreadsAreSuspended(Thread * self)272 void ThreadList::AssertOtherThreadsAreSuspended(Thread* self) {
273   MutexLock mu(self, *Locks::thread_list_lock_);
274   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
275   for (const auto& thread : list_) {
276     if (thread != self) {
277       CHECK(thread->IsSuspended())
278             << "\nUnsuspended thread: <<" << *thread << "\n"
279             << "self: <<" << *Thread::Current();
280     }
281   }
282 }
283 
284 #if HAVE_TIMED_RWLOCK
285 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForThreadSuspendAllTimeout()286 NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
287   // Increment gAborting before doing the thread list dump since we don't want any failures from
288   // AssertThreadSuspensionIsAllowable in cases where thread suspension is not allowed.
289   // See b/69044468.
290   ++gAborting;
291   Runtime* runtime = Runtime::Current();
292   std::ostringstream ss;
293   ss << "Thread suspend timeout\n";
294   Locks::mutator_lock_->Dump(ss);
295   ss << "\n";
296   runtime->GetThreadList()->Dump(ss);
297   --gAborting;
298   LOG(FATAL) << ss.str();
299   exit(0);
300 }
301 #endif
302 
RunCheckpoint(Closure * checkpoint_function,Closure * callback,bool allow_lock_checking,bool acquire_mutator_lock)303 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function,
304                                  Closure* callback,
305                                  bool allow_lock_checking,
306                                  bool acquire_mutator_lock) {
307   Thread* self = Thread::Current();
308   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
309   Locks::thread_list_lock_->AssertNotHeld(self);
310   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
311   if (kIsDebugBuild && allow_lock_checking && !acquire_mutator_lock) {
312     // TODO: Consider better checking with acquire_mutator_lock.
313     self->DisallowPreMonitorMutexes();
314   }
315 
316   std::vector<Thread*> remaining_threads;
317   size_t count = 0;
318   bool mutator_lock_held = Locks::mutator_lock_->IsSharedHeld(self);
319   ThreadState old_thread_state = self->GetState();
320   DCHECK(!(mutator_lock_held && acquire_mutator_lock));
321 
322   // Thread-safety analysis wants the lock state to always be the same at every program point.
323   // Allow us to pretend it is.
324   auto fake_mutator_lock = []() SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
325                                NO_THREAD_SAFETY_ANALYSIS {};
326   auto fake_mutator_unlock = []() UNLOCK_FUNCTION(Locks::mutator_lock_)
327                                  NO_THREAD_SAFETY_ANALYSIS {};
328 
329   if (acquire_mutator_lock) {
330     self->TransitionFromSuspendedToRunnable();
331   } else {
332     fake_mutator_lock();
333   }
334   Locks::thread_list_lock_->Lock(self);
335   Locks::thread_suspend_count_lock_->Lock(self);
336 
337   // First try to install checkpoint function in each thread. This will succeed only for
338   // runnable threads. Track others in remaining_threads.
339   count = list_.size();
340   for (const auto& thread : list_) {
341     if (thread != self) {
342       if (thread->RequestCheckpoint(checkpoint_function)) {
343         // This thread will run its checkpoint some time in the near future.
344       } else {
345         remaining_threads.push_back(thread);
346       }
347     }
348     // Thread either has honored or will honor the checkpoint, or it has been added to
349     // remaining_threads.
350   }
351 
352   // ith entry corresponds to remaining_threads[i]:
353   std::unique_ptr<ThreadExitFlag[]> tefs(new ThreadExitFlag[remaining_threads.size()]);
354 
355   // Register a ThreadExitFlag for each remaining thread.
356   for (size_t i = 0; i < remaining_threads.size(); ++i) {
357     remaining_threads[i]->NotifyOnThreadExit(&tefs[i]);
358   }
359 
360   // Run the callback to be called inside this critical section.
361   if (callback != nullptr) {
362     callback->Run(self);
363   }
364 
365   size_t nthreads = remaining_threads.size();
366   size_t starting_thread = 0;
367   size_t next_starting_thread;  // First possible remaining non-null entry in remaining_threads.
368   // Run the checkpoint for the suspended threads.
369   do {
370     // We hold mutator_lock_ (if desired), thread_list_lock_, and suspend_count_lock_
371     next_starting_thread = nthreads;
372     for (size_t i = 0; i < nthreads; ++i) {
373       Thread* thread = remaining_threads[i];
374       if (thread == nullptr) {
375         continue;
376       }
377       if (tefs[i].HasExited()) {
378         remaining_threads[i] = nullptr;
379         --count;
380         continue;
381       }
382       bool was_runnable = thread->RequestCheckpoint(checkpoint_function);
383       if (was_runnable) {
384         // Thread became runnable, and will run the checkpoint; we're done.
385         thread->UnregisterThreadExitFlag(&tefs[i]);
386         remaining_threads[i] = nullptr;
387         continue;
388       }
389       // Thread was still suspended, as expected.
390       // We need to run the checkpoint ourselves. Suspend thread so it stays suspended.
391       thread->IncrementSuspendCount(self);
392       if (LIKELY(thread->IsSuspended())) {
393         // Run the checkpoint function ourselves.
394         // We need to run the checkpoint function without the thread_list and suspend_count locks.
395         Locks::thread_suspend_count_lock_->Unlock(self);
396         Locks::thread_list_lock_->Unlock(self);
397         if (mutator_lock_held || acquire_mutator_lock) {
398           // Make sure there is no pending flip function before running Java-heap-accessing
399           // checkpoint on behalf of thread.
400           Thread::EnsureFlipFunctionStarted(self, thread);
401           if (thread->GetStateAndFlags(std::memory_order_acquire)
402                   .IsAnyOfFlagsSet(Thread::FlipFunctionFlags())) {
403             // There is another thread running the flip function for 'thread'.
404             // Instead of waiting for it to complete, move to the next thread.
405             // Retry this one later from scratch.
406             next_starting_thread = std::min(next_starting_thread, i);
407             Locks::thread_list_lock_->Lock(self);
408             Locks::thread_suspend_count_lock_->Lock(self);
409             thread->DecrementSuspendCount(self);
410             Thread::resume_cond_->Broadcast(self);
411             continue;
412           }
413         }  // O.w. the checkpoint will not access Java data structures, and doesn't care whether
414            // the flip function has been called.
415         checkpoint_function->Run(thread);
416         if (acquire_mutator_lock) {
417           {
418             MutexLock mu3(self, *Locks::thread_suspend_count_lock_);
419             thread->DecrementSuspendCount(self);
420             // In the case of a thread waiting for IO or the like, there will be no waiters
421             // on resume_cond_, so Broadcast() will not enter the kernel, and thus be cheap.
422             Thread::resume_cond_->Broadcast(self);
423           }
424           {
425             // Allow us to run checkpoints, or be suspended between checkpoint invocations.
426             ScopedThreadSuspension sts(self, old_thread_state);
427           }
428           Locks::thread_list_lock_->Lock(self);
429           Locks::thread_suspend_count_lock_->Lock(self);
430         } else {
431           Locks::thread_list_lock_->Lock(self);
432           Locks::thread_suspend_count_lock_->Lock(self);
433           thread->DecrementSuspendCount(self);
434           Thread::resume_cond_->Broadcast(self);
435         }
436         thread->UnregisterThreadExitFlag(&tefs[i]);
437         remaining_threads[i] = nullptr;
438       } else {
439         // Thread may have become runnable between the time we last checked and
440         // the time we incremented the suspend count. We defer to the next attempt, rather than
441         // waiting for it to suspend. Note that this may still unnecessarily trigger a signal
442         // handler, but it should be exceedingly rare.
443         thread->DecrementSuspendCount(self);
444         Thread::resume_cond_->Broadcast(self);
445         next_starting_thread = std::min(next_starting_thread, i);
446       }
447     }
448     starting_thread = next_starting_thread;
449   } while (starting_thread != nthreads);
450 
451   // Finally run the checkpoint on ourself. We will already have run the flip function, if we're
452   // runnable.
453   Locks::thread_list_lock_->Unlock(self);
454   Locks::thread_suspend_count_lock_->Unlock(self);
455   checkpoint_function->Run(self);
456 
457   if (acquire_mutator_lock) {
458     self->TransitionFromRunnableToSuspended(old_thread_state);
459   } else {
460     fake_mutator_unlock();
461   }
462 
463   DCHECK(std::all_of(remaining_threads.cbegin(), remaining_threads.cend(), [](Thread* thread) {
464     return thread == nullptr;
465   }));
466   Thread::DCheckUnregisteredEverywhere(&tefs[0], &tefs[nthreads - 1]);
467 
468   if (kIsDebugBuild && allow_lock_checking & !acquire_mutator_lock) {
469     self->AllowPreMonitorMutexes();
470   }
471   return count;
472 }
473 
RunEmptyCheckpoint()474 void ThreadList::RunEmptyCheckpoint() {
475   Thread* self = Thread::Current();
476   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
477   Locks::thread_list_lock_->AssertNotHeld(self);
478   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
479   std::vector<uint32_t> runnable_thread_ids;
480   size_t count = 0;
481   Barrier* barrier = empty_checkpoint_barrier_.get();
482   barrier->Init(self, 0);
483   {
484     MutexLock mu(self, *Locks::thread_list_lock_);
485     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
486     for (Thread* thread : list_) {
487       if (thread != self) {
488         while (true) {
489           if (thread->RequestEmptyCheckpoint()) {
490             // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
491             // some time in the near future.
492             ++count;
493             if (kIsDebugBuild) {
494               runnable_thread_ids.push_back(thread->GetThreadId());
495             }
496             break;
497           }
498           if (thread->GetState() != ThreadState::kRunnable) {
499             // It's seen suspended, we are done because it must not be in the middle of a mutator
500             // heap access.
501             break;
502           }
503         }
504       }
505     }
506   }
507 
508   // Wake up the threads blocking for weak ref access so that they will respond to the empty
509   // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
510   Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
511   Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true);
512   {
513     ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
514     uint64_t total_wait_time = 0;
515     bool first_iter = true;
516     while (true) {
517       // Wake up the runnable threads blocked on the mutexes that another thread, which is blocked
518       // on a weak ref access, holds (indirectly blocking for weak ref access through another thread
519       // and a mutex.) This needs to be done periodically because the thread may be preempted
520       // between the CheckEmptyCheckpointFromMutex call and the subsequent futex wait in
521       // Mutex::ExclusiveLock, etc. when the wakeup via WakeupToRespondToEmptyCheckpoint
522       // arrives. This could cause a *very rare* deadlock, if not repeated. Most of the cases are
523       // handled in the first iteration.
524       for (BaseMutex* mutex : Locks::expected_mutexes_on_weak_ref_access_) {
525         mutex->WakeupToRespondToEmptyCheckpoint();
526       }
527       static constexpr uint64_t kEmptyCheckpointPeriodicTimeoutMs = 100;  // 100ms
528       static constexpr uint64_t kEmptyCheckpointTotalTimeoutMs = 600 * 1000;  // 10 minutes.
529       size_t barrier_count = first_iter ? count : 0;
530       first_iter = false;  // Don't add to the barrier count from the second iteration on.
531       bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointPeriodicTimeoutMs);
532       if (!timed_out) {
533         break;  // Success
534       }
535       // This is a very rare case.
536       total_wait_time += kEmptyCheckpointPeriodicTimeoutMs;
537       if (kIsDebugBuild && total_wait_time > kEmptyCheckpointTotalTimeoutMs) {
538         std::ostringstream ss;
539         ss << "Empty checkpoint timeout\n";
540         ss << "Barrier count " << barrier->GetCount(self) << "\n";
541         ss << "Runnable thread IDs";
542         for (uint32_t tid : runnable_thread_ids) {
543           ss << " " << tid;
544         }
545         ss << "\n";
546         Locks::mutator_lock_->Dump(ss);
547         ss << "\n";
548         LOG(FATAL_WITHOUT_ABORT) << ss.str();
549         // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
550         // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
551         {
552           ScopedObjectAccess soa(self);
553           MutexLock mu1(self, *Locks::thread_list_lock_);
554           for (Thread* thread : GetList()) {
555             uint32_t tid = thread->GetThreadId();
556             bool is_in_runnable_thread_ids =
557                 std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
558                 runnable_thread_ids.end();
559             if (is_in_runnable_thread_ids &&
560                 thread->ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
561               // Found a runnable thread that hasn't responded to the empty checkpoint request.
562               // Assume it's stuck and safe to dump its stack.
563               thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
564                            /*dump_native_stack=*/ true,
565                            /*force_dump_stack=*/ true);
566             }
567           }
568         }
569         LOG(FATAL_WITHOUT_ABORT)
570             << "Dumped runnable threads that haven't responded to empty checkpoint.";
571         // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
572         Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
573         LOG(FATAL) << "Dumped all threads.";
574       }
575     }
576   }
577 }
578 
579 // Separate function to disable just the right amount of thread-safety analysis.
AcquireMutatorLockSharedUncontended(Thread * self)580 ALWAYS_INLINE void AcquireMutatorLockSharedUncontended(Thread* self)
581     ACQUIRE_SHARED(*Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
582   bool success = Locks::mutator_lock_->SharedTryLock(self, /*check=*/false);
583   CHECK(success);
584 }
585 
586 // A checkpoint/suspend-all hybrid to switch thread roots from
587 // from-space to to-space refs. Used to synchronize threads at a point
588 // to mark the initiation of marking while maintaining the to-space
589 // invariant.
FlipThreadRoots(Closure * thread_flip_visitor,Closure * flip_callback,gc::collector::GarbageCollector * collector,gc::GcPauseListener * pause_listener)590 void ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
591                                  Closure* flip_callback,
592                                  gc::collector::GarbageCollector* collector,
593                                  gc::GcPauseListener* pause_listener) {
594   TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
595   Thread* self = Thread::Current();
596   Locks::mutator_lock_->AssertNotHeld(self);
597   Locks::thread_list_lock_->AssertNotHeld(self);
598   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
599   CHECK_NE(self->GetState(), ThreadState::kRunnable);
600 
601   collector->GetHeap()->ThreadFlipBegin(self);  // Sync with JNI critical calls.
602 
603   // ThreadFlipBegin happens before we suspend all the threads, so it does not
604   // count towards the pause.
605   const uint64_t suspend_start_time = NanoTime();
606   VLOG(threads) << "Suspending all for thread flip";
607   {
608     ScopedTrace trace("ThreadFlipSuspendAll");
609     SuspendAllInternal(self);
610   }
611 
612   std::vector<Thread*> flipping_threads;  // All suspended threads. Includes us.
613   int thread_count;
614   // Flipping threads might exit between the time we resume them and try to run the flip function.
615   // Track that in a parallel vector.
616   std::unique_ptr<ThreadExitFlag[]> exit_flags;
617 
618   {
619     TimingLogger::ScopedTiming t("FlipThreadSuspension", collector->GetTimings());
620     if (pause_listener != nullptr) {
621       pause_listener->StartPause();
622     }
623 
624     // Run the flip callback for the collector.
625     Locks::mutator_lock_->ExclusiveLock(self);
626     suspend_all_histogram_.AdjustAndAddValue(NanoTime() - suspend_start_time);
627     flip_callback->Run(self);
628 
629     {
630       MutexLock mu(self, *Locks::thread_list_lock_);
631       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
632       thread_count = list_.size();
633       exit_flags.reset(new ThreadExitFlag[thread_count]);
634       flipping_threads.resize(thread_count, nullptr);
635       int i = 1;
636       for (Thread* thread : list_) {
637         // Set the flip function for all threads because once we start resuming any threads,
638         // they may need to run the flip function on behalf of other threads, even this one.
639         DCHECK(thread == self || thread->IsSuspended());
640         thread->SetFlipFunction(thread_flip_visitor);
641         // Put ourselves first, so other threads are more likely to have finished before we get
642         // there.
643         int thread_index = thread == self ? 0 : i++;
644         flipping_threads[thread_index] = thread;
645         thread->NotifyOnThreadExit(&exit_flags[thread_index]);
646       }
647       DCHECK(i == thread_count);
648     }
649 
650     if (pause_listener != nullptr) {
651       pause_listener->EndPause();
652     }
653   }
654   // Any new threads created after this will be created by threads that already ran their flip
655   // functions. In the normal GC use case in which the flip function converts all local references
656   // to to-space references, these newly created threads will also see only to-space references.
657 
658   // Resume threads, making sure that we do not release suspend_count_lock_ until we've reacquired
659   // the mutator_lock_ in shared mode, and decremented suspend_all_count_.  This avoids a
660   // concurrent SuspendAll, and ensures that newly started threads see a correct value of
661   // suspend_all_count.
662   {
663     MutexLock mu(self, *Locks::thread_list_lock_);
664     Locks::thread_suspend_count_lock_->Lock(self);
665     ResumeAllInternal(self);
666   }
667   collector->RegisterPause(NanoTime() - suspend_start_time);
668 
669   // Since all threads were suspended, they will attempt to run the flip function before
670   // reentering a runnable state. We will also attempt to run the flip functions ourselves.  Any
671   // intervening checkpoint request will do the same.  Exactly one of those flip function attempts
672   // will succeed, and the target thread will not be able to reenter a runnable state until one of
673   // them does.
674 
675   // Try to run the closure on the other threads.
676   TimingLogger::ScopedTiming split3("RunningThreadFlips", collector->GetTimings());
677   // Reacquire the mutator lock while holding suspend_count_lock. This cannot fail, since we
678   // do not acquire the mutator lock unless suspend_all_count was read as 0 while holding
679   // suspend_count_lock. We did not release suspend_count_lock since releasing the mutator
680   // lock.
681   AcquireMutatorLockSharedUncontended(self);
682 
683   Locks::thread_suspend_count_lock_->Unlock(self);
684   // Concurrent SuspendAll may now see zero suspend_all_count_, but block on mutator_lock_.
685 
686   collector->GetHeap()->ThreadFlipEnd(self);
687 
688   for (int i = 0; i < thread_count; ++i) {
689     bool finished;
690     Thread::EnsureFlipFunctionStarted(
691         self, flipping_threads[i], Thread::StateAndFlags(0), &exit_flags[i], &finished);
692     if (finished) {
693       MutexLock mu2(self, *Locks::thread_list_lock_);
694       flipping_threads[i]->UnregisterThreadExitFlag(&exit_flags[i]);
695       flipping_threads[i] = nullptr;
696     }
697   }
698   // Make sure all flips complete before we return.
699   for (int i = 0; i < thread_count; ++i) {
700     if (UNLIKELY(flipping_threads[i] != nullptr)) {
701       flipping_threads[i]->WaitForFlipFunctionTestingExited(self, &exit_flags[i]);
702       MutexLock mu2(self, *Locks::thread_list_lock_);
703       flipping_threads[i]->UnregisterThreadExitFlag(&exit_flags[i]);
704     }
705   }
706 
707   Thread::DCheckUnregisteredEverywhere(&exit_flags[0], &exit_flags[thread_count - 1]);
708 
709   Locks::mutator_lock_->SharedUnlock(self);
710 }
711 
712 // True only for debugging suspend timeout code. The resulting timeouts are short enough that
713 // failures are expected.
714 static constexpr bool kShortSuspendTimeouts = false;
715 
716 static constexpr unsigned kSuspendBarrierIters = kShortSuspendTimeouts ? 5 : 20;
717 
718 #if ART_USE_FUTEXES
719 
720 // Returns true if it timed out.
WaitOnceForSuspendBarrier(AtomicInteger * barrier,int32_t cur_val,uint64_t timeout_ns)721 static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
722                                       int32_t cur_val,
723                                       uint64_t timeout_ns) {
724   timespec wait_timeout;
725   if (kShortSuspendTimeouts) {
726     timeout_ns = MsToNs(kSuspendBarrierIters);
727     CHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 1ul);
728   } else {
729     DCHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 10ul);
730   }
731   InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(timeout_ns / kSuspendBarrierIters), 0, &wait_timeout);
732   if (futex(barrier->Address(), FUTEX_WAIT_PRIVATE, cur_val, &wait_timeout, nullptr, 0) != 0) {
733     if (errno == ETIMEDOUT) {
734       return true;
735     } else if (errno != EAGAIN && errno != EINTR) {
736       PLOG(FATAL) << "futex wait for suspend barrier failed";
737     }
738   }
739   return false;
740 }
741 
742 #else
743 
WaitOnceForSuspendBarrier(AtomicInteger * barrier,int32_t cur_val,uint64_t timeout_ns)744 static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
745                                       int32_t cur_val,
746                                       uint64_t timeout_ns) {
747   // In the normal case, aim for a couple of hundred milliseconds.
748   static constexpr unsigned kInnerIters =
749       kShortSuspendTimeouts ? 1'000 : (timeout_ns / 1000) / kSuspendBarrierIters;
750   DCHECK_GE(kInnerIters, 1'000u);
751   for (int i = 0; i < kInnerIters; ++i) {
752     sched_yield();
753     if (barrier->load(std::memory_order_acquire) == 0) {
754       return false;
755     }
756   }
757   return true;
758 }
759 
760 #endif  // ART_USE_FUTEXES
761 
WaitForSuspendBarrier(AtomicInteger * barrier,pid_t t,int attempt_of_4)762 std::optional<std::string> ThreadList::WaitForSuspendBarrier(AtomicInteger* barrier,
763                                                              pid_t t,
764                                                              int attempt_of_4) {
765   // Only fail after kIter timeouts, to make us robust against app freezing.
766 #if ART_USE_FUTEXES
767   const uint64_t start_time = NanoTime();
768 #endif
769   uint64_t timeout_ns =
770       attempt_of_4 == 0 ? thread_suspend_timeout_ns_ : thread_suspend_timeout_ns_ / 4;
771 
772   uint64_t avg_wait_multiplier = 1;
773   uint64_t wait_multiplier = 1;
774   if (attempt_of_4 != 1) {
775     // TODO: RequestSynchronousCheckpoint routinely passes attempt_of_4 = 0. Can
776     // we avoid the getpriority() call?
777     if (getpriority(PRIO_PROCESS, 0 /* this thread */) > 0) {
778       // We're a low priority thread, and thus have a longer ANR timeout. Increase the suspend
779       // timeout.
780       avg_wait_multiplier = 3;
781     }
782     // To avoid the system calls in the common case, we fail to increase the first of 4 waits, but
783     // then compensate during the last one. This also allows somewhat longer thread monitoring
784     // before we time out.
785     wait_multiplier = attempt_of_4 == 4 ? 2 * avg_wait_multiplier - 1 : avg_wait_multiplier;
786     timeout_ns *= wait_multiplier;
787   }
788   bool collect_state = (t != 0 && (attempt_of_4 == 0 || attempt_of_4 == 4));
789   int32_t cur_val = barrier->load(std::memory_order_acquire);
790   if (cur_val <= 0) {
791     DCHECK_EQ(cur_val, 0);
792     return std::nullopt;
793   }
794   unsigned i = 0;
795   if (WaitOnceForSuspendBarrier(barrier, cur_val, timeout_ns)) {
796     i = 1;
797   }
798   cur_val = barrier->load(std::memory_order_acquire);
799   if (cur_val <= 0) {
800     DCHECK_EQ(cur_val, 0);
801     return std::nullopt;
802   }
803 
804   // Long wait; gather information in case of timeout.
805   std::string sampled_state = collect_state ? GetOsThreadStatQuick(t) : "";
806   while (i < kSuspendBarrierIters) {
807     if (WaitOnceForSuspendBarrier(barrier, cur_val, timeout_ns)) {
808       ++i;
809 #if ART_USE_FUTEXES
810       if (!kShortSuspendTimeouts) {
811         CHECK_GE(NanoTime() - start_time, i * timeout_ns / kSuspendBarrierIters - 1'000'000);
812       }
813 #endif
814     }
815     cur_val = barrier->load(std::memory_order_acquire);
816     if (cur_val <= 0) {
817       DCHECK_EQ(cur_val, 0);
818       return std::nullopt;
819     }
820   }
821   uint64_t final_wait_time = NanoTime() - start_time;
822   uint64_t total_wait_time = attempt_of_4 == 0 ?
823                                  final_wait_time :
824                                  4 * final_wait_time * avg_wait_multiplier / wait_multiplier;
825   return collect_state ? "Target states: [" + sampled_state + ", " + GetOsThreadStatQuick(t) + "]" +
826                              (cur_val == 0 ? "(barrier now passed)" : "") +
827                              " Final wait time: " + PrettyDuration(final_wait_time) +
828                              "; appr. total wait time: " + PrettyDuration(total_wait_time) :
829                          "";
830 }
831 
SuspendAll(const char * cause,bool long_suspend)832 void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
833   Thread* self = Thread::Current();
834 
835   if (self != nullptr) {
836     VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
837   } else {
838     VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
839   }
840   {
841     ScopedTrace trace("Suspending mutator threads");
842     const uint64_t start_time = NanoTime();
843 
844     SuspendAllInternal(self);
845     // All threads are known to have suspended (but a thread may still own the mutator lock)
846     // Make sure this thread grabs exclusive access to the mutator lock and its protected data.
847 #if HAVE_TIMED_RWLOCK
848     while (true) {
849       if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self,
850                                                          NsToMs(thread_suspend_timeout_ns_),
851                                                          0)) {
852         break;
853       } else if (!long_suspend_) {
854         // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
855         // could result in a thread suspend timeout.
856         // Timeout if we wait more than thread_suspend_timeout_ns_ nanoseconds.
857         UnsafeLogFatalForThreadSuspendAllTimeout();
858       }
859     }
860 #else
861     Locks::mutator_lock_->ExclusiveLock(self);
862 #endif
863 
864     long_suspend_ = long_suspend;
865 
866     const uint64_t end_time = NanoTime();
867     const uint64_t suspend_time = end_time - start_time;
868     suspend_all_histogram_.AdjustAndAddValue(suspend_time);
869     if (suspend_time > kLongThreadSuspendThreshold) {
870       LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
871     }
872 
873     if (kDebugLocking) {
874       // Debug check that all threads are suspended.
875       AssertOtherThreadsAreSuspended(self);
876     }
877   }
878 
879   // SuspendAllInternal blocks if we are in the middle of a flip.
880   DCHECK(!self->ReadFlag(ThreadFlag::kPendingFlipFunction));
881   DCHECK(!self->ReadFlag(ThreadFlag::kRunningFlipFunction));
882 
883   ATraceBegin((std::string("Mutator threads suspended for ") + cause).c_str());
884 
885   if (self != nullptr) {
886     VLOG(threads) << *self << " SuspendAll complete";
887   } else {
888     VLOG(threads) << "Thread[null] SuspendAll complete";
889   }
890 }
891 
892 // Ensures all threads running Java suspend and that those not running Java don't start.
SuspendAllInternal(Thread * self,SuspendReason reason)893 void ThreadList::SuspendAllInternal(Thread* self, SuspendReason reason) {
894   // self can be nullptr if this is an unregistered thread.
895   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
896   Locks::thread_list_lock_->AssertNotHeld(self);
897   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
898   if (kDebugLocking && self != nullptr) {
899     CHECK_NE(self->GetState(), ThreadState::kRunnable);
900   }
901 
902   // First request that all threads suspend, then wait for them to suspend before
903   // returning. This suspension scheme also relies on other behaviour:
904   // 1. Threads cannot be deleted while they are suspended or have a suspend-
905   //    request flag set - (see Unregister() below).
906   // 2. When threads are created, they are created in a suspended state (actually
907   //    kNative) and will never begin executing Java code without first checking
908   //    the suspend-request flag.
909 
910   // The atomic counter for number of threads that need to pass the barrier.
911   AtomicInteger pending_threads;
912 
913   for (int iter_count = 1;; ++iter_count) {
914     {
915       MutexLock mu(self, *Locks::thread_list_lock_);
916       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
917       if (suspend_all_count_ == 0) {
918         // Never run multiple SuspendAlls concurrently.
919         // If we are asked to suspend ourselves, we proceed anyway, but must ignore suspend
920         // request from other threads until we resume them.
921         bool found_myself = false;
922         // Update global suspend all state for attaching threads.
923         ++suspend_all_count_;
924         pending_threads.store(list_.size() - (self == nullptr ? 0 : 1), std::memory_order_relaxed);
925         // Increment everybody else's suspend count.
926         for (const auto& thread : list_) {
927           if (thread == self) {
928             found_myself = true;
929           } else {
930             VLOG(threads) << "requesting thread suspend: " << *thread;
931             DCHECK_EQ(suspend_all_count_, 1);
932             thread->IncrementSuspendCount(self, &pending_threads, nullptr, reason);
933             if (thread->IsSuspended()) {
934               // Effectively pass the barrier on behalf of the already suspended thread.
935               // The thread itself cannot yet have acted on our request since we still hold the
936               // suspend_count_lock_, and it will notice that kActiveSuspendBarrier has already
937               // been cleared if and when it acquires the lock in PassActiveSuspendBarriers().
938               DCHECK_EQ(thread->tlsPtr_.active_suspendall_barrier, &pending_threads);
939               pending_threads.fetch_sub(1, std::memory_order_seq_cst);
940               thread->tlsPtr_.active_suspendall_barrier = nullptr;
941               if (!thread->HasActiveSuspendBarrier()) {
942                 thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
943               }
944             }
945             // else:
946             // The target thread was not yet suspended, and hence will be forced to execute
947             // TransitionFromRunnableToSuspended shortly. Since we set the kSuspendRequest flag
948             // before checking, and it checks kActiveSuspendBarrier after noticing kSuspendRequest,
949             // it must notice kActiveSuspendBarrier when it does. Thus it is guaranteed to
950             // decrement the suspend barrier. We're relying on store; load ordering here, but
951             // that's not a problem, since state and flags all reside in the same atomic, and
952             // are thus properly ordered, even for relaxed accesses.
953           }
954         }
955         self->AtomicSetFlag(ThreadFlag::kSuspensionImmune, std::memory_order_relaxed);
956         DCHECK(self == nullptr || found_myself);
957         break;
958       }
959     }
960     if (iter_count >= kMaxSuspendRetries) {
961       LOG(FATAL) << "Too many SuspendAll retries: " << iter_count;
962     } else {
963       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
964       DCHECK_LE(suspend_all_count_, 1);
965       if (suspend_all_count_ != 0) {
966         // This may take a while, and we're not runnable, and thus would otherwise not block.
967         Thread::resume_cond_->WaitHoldingLocks(self);
968         continue;
969       }
970     }
971     // We're already not runnable, so an attempt to suspend us should succeed.
972   }
973 
974   Thread* culprit = nullptr;
975   pid_t tid = 0;
976   std::ostringstream oss;
977   for (int attempt_of_4 = 1; attempt_of_4 <= 4; ++attempt_of_4) {
978     auto result = WaitForSuspendBarrier(&pending_threads, tid, attempt_of_4);
979     if (!result.has_value()) {
980       // Wait succeeded.
981       break;
982     }
983     if (attempt_of_4 == 3) {
984       // Second to the last attempt; Try to gather more information in case we time out.
985       MutexLock mu(self, *Locks::thread_list_lock_);
986       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
987       oss << "remaining threads: ";
988       for (const auto& thread : list_) {
989         if (thread != self && !thread->IsSuspended()) {
990           culprit = thread;
991           oss << *thread << ", ";
992         }
993       }
994       if (culprit != nullptr) {
995         tid = culprit->GetTid();
996       }
997     } else if (attempt_of_4 == 4) {
998       // Final attempt still timed out.
999       if (culprit == nullptr) {
1000         LOG(FATAL) << "SuspendAll timeout. Couldn't find holdouts.";
1001       } else {
1002         std::string name;
1003         culprit->GetThreadName(name);
1004         oss << "Info for " << name << ": ";
1005         std::string thr_descr =
1006             StringPrintf("state&flags: 0x%x, Java/native priority: %d/%d, barrier value: %d, ",
1007                          culprit->GetStateAndFlags(std::memory_order_relaxed).GetValue(),
1008                          culprit->GetNativePriority(),
1009                          getpriority(PRIO_PROCESS /* really thread */, culprit->GetTid()),
1010                          pending_threads.load());
1011         oss << thr_descr << result.value();
1012         culprit->AbortInThis("SuspendAll timeout; " + oss.str());
1013       }
1014     }
1015   }
1016 }
1017 
ResumeAll()1018 void ThreadList::ResumeAll() {
1019   Thread* self = Thread::Current();
1020   if (kDebugLocking) {
1021     // Debug check that all threads are suspended.
1022     AssertOtherThreadsAreSuspended(self);
1023   }
1024   MutexLock mu(self, *Locks::thread_list_lock_);
1025   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1026   ATraceEnd();  // Matching "Mutator threads suspended ..." in SuspendAll.
1027   ResumeAllInternal(self);
1028 }
1029 
1030 // Holds thread_list_lock_ and suspend_count_lock_
ResumeAllInternal(Thread * self)1031 void ThreadList::ResumeAllInternal(Thread* self) {
1032   DCHECK_NE(self->GetState(), ThreadState::kRunnable);
1033   if (self != nullptr) {
1034     VLOG(threads) << *self << " ResumeAll starting";
1035   } else {
1036     VLOG(threads) << "Thread[null] ResumeAll starting";
1037   }
1038 
1039   ScopedTrace trace("Resuming mutator threads");
1040 
1041   long_suspend_ = false;
1042 
1043   Locks::mutator_lock_->ExclusiveUnlock(self);
1044 
1045   // Decrement the suspend counts for all threads.
1046   for (const auto& thread : list_) {
1047     if (thread != self) {
1048       thread->DecrementSuspendCount(self);
1049     }
1050   }
1051 
1052   // Update global suspend all state for attaching threads. Unblocks other SuspendAlls once
1053   // suspend_count_lock_ is released.
1054   --suspend_all_count_;
1055   self->AtomicClearFlag(ThreadFlag::kSuspensionImmune, std::memory_order_relaxed);
1056   // Pending suspend requests for us will be handled when we become Runnable again.
1057 
1058   // Broadcast a notification to all suspended threads, some or all of
1059   // which may choose to wake up.  No need to wait for them.
1060   if (self != nullptr) {
1061     VLOG(threads) << *self << " ResumeAll waking others";
1062   } else {
1063     VLOG(threads) << "Thread[null] ResumeAll waking others";
1064   }
1065   Thread::resume_cond_->Broadcast(self);
1066 
1067   if (self != nullptr) {
1068     VLOG(threads) << *self << " ResumeAll complete";
1069   } else {
1070     VLOG(threads) << "Thread[null] ResumeAll complete";
1071   }
1072 }
1073 
Resume(Thread * thread,SuspendReason reason)1074 bool ThreadList::Resume(Thread* thread, SuspendReason reason) {
1075   // This assumes there was an ATraceBegin when we suspended the thread.
1076   ATraceEnd();
1077 
1078   Thread* self = Thread::Current();
1079   DCHECK_NE(thread, self);
1080   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..." << reason;
1081 
1082   {
1083     // To check Contains.
1084     MutexLock mu(self, *Locks::thread_list_lock_);
1085     // To check IsSuspended.
1086     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1087     if (UNLIKELY(!thread->IsSuspended())) {
1088       LOG(reason == SuspendReason::kForUserCode ? ERROR : FATAL)
1089           << "Resume(" << reinterpret_cast<void*>(thread) << ") thread not suspended";
1090       return false;
1091     }
1092     if (!Contains(thread)) {
1093       // We only expect threads within the thread-list to have been suspended otherwise we can't
1094       // stop such threads from delete-ing themselves.
1095       LOG(reason == SuspendReason::kForUserCode ? ERROR : FATAL)
1096           << "Resume(" << reinterpret_cast<void*>(thread) << ") thread not within thread list";
1097       return false;
1098     }
1099     thread->DecrementSuspendCount(self, /*for_user_code=*/(reason == SuspendReason::kForUserCode));
1100     Thread::resume_cond_->Broadcast(self);
1101   }
1102 
1103   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") finished waking others";
1104   return true;
1105 }
1106 
SuspendThread(Thread * self,Thread * thread,SuspendReason reason,ThreadState self_state,const char * func_name,int attempt_of_4)1107 bool ThreadList::SuspendThread(Thread* self,
1108                                Thread* thread,
1109                                SuspendReason reason,
1110                                ThreadState self_state,
1111                                const char* func_name,
1112                                int attempt_of_4) {
1113   bool is_suspended = false;
1114   VLOG(threads) << func_name << "starting";
1115   pid_t tid = thread->GetTid();
1116   uint8_t suspended_count;
1117   uint8_t checkpoint_count;
1118   WrappedSuspend1Barrier wrapped_barrier{};
1119   static_assert(sizeof wrapped_barrier.barrier_ == sizeof(uint32_t));
1120   ThreadExitFlag tef;
1121   bool exited = false;
1122   thread->NotifyOnThreadExit(&tef);
1123   int iter_count = 1;
1124   do {
1125     {
1126       Locks::mutator_lock_->AssertSharedHeld(self);
1127       Locks::thread_list_lock_->AssertHeld(self);
1128       // Note: this will transition to runnable and potentially suspend.
1129       DCHECK(Contains(thread));
1130       // This implementation fails if thread == self. Let the clients handle that case
1131       // appropriately.
1132       CHECK_NE(thread, self) << func_name << "(self)";
1133       VLOG(threads) << func_name << " suspending: " << *thread;
1134       {
1135         MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1136         if (LIKELY(self->GetSuspendCount() == 0)) {
1137           suspended_count = thread->suspended_count_;
1138           checkpoint_count = thread->checkpoint_count_;
1139           thread->IncrementSuspendCount(self, nullptr, &wrapped_barrier, reason);
1140           if (thread->IsSuspended()) {
1141             // See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
1142             thread->RemoveFirstSuspend1Barrier(&wrapped_barrier);
1143             // PassActiveSuspendBarriers couldn't have seen our barrier, since it also acquires
1144             // 'thread_suspend_count_lock_'. `wrapped_barrier` will not be accessed.
1145             if (!thread->HasActiveSuspendBarrier()) {
1146               thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1147             }
1148             is_suspended = true;
1149           }
1150           DCHECK_GT(thread->GetSuspendCount(), 0);
1151           break;
1152         }
1153         // Else we hold the suspend count lock but another thread is trying to suspend us,
1154         // making it unsafe to try to suspend another thread in case we get a cycle.
1155         // Start the loop again, which will allow this thread to be suspended.
1156       }
1157     }
1158     // All locks are released, and we should quickly exit the suspend-unfriendly state. Retry.
1159     if (iter_count >= kMaxSuspendRetries) {
1160       LOG(FATAL) << "Too many suspend retries";
1161     }
1162     Locks::thread_list_lock_->ExclusiveUnlock(self);
1163     {
1164       ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1165       usleep(kThreadSuspendSleepUs);
1166       ++iter_count;
1167     }
1168     Locks::thread_list_lock_->ExclusiveLock(self);
1169     exited = tef.HasExited();
1170   } while (!exited);
1171   thread->UnregisterThreadExitFlag(&tef);
1172   Locks::thread_list_lock_->ExclusiveUnlock(self);
1173   self->TransitionFromRunnableToSuspended(self_state);
1174   if (exited) {
1175     // This is OK: There's a race in inflating a lock and the owner giving up ownership and then
1176     // dying.
1177     LOG(WARNING) << StringPrintf("Thread with tid %d exited before suspending", tid);
1178     return false;
1179   }
1180   // Now wait for target to decrement suspend barrier.
1181   std::optional<std::string> failure_info;
1182   if (!is_suspended) {
1183     failure_info = WaitForSuspendBarrier(&wrapped_barrier.barrier_, tid, attempt_of_4);
1184     if (!failure_info.has_value()) {
1185       is_suspended = true;
1186     }
1187   }
1188   while (!is_suspended) {
1189     if (attempt_of_4 > 0 && attempt_of_4 < 4) {
1190       // Caller will try again. Give up and resume the thread for now.  We need to make sure
1191       // that wrapped_barrier is removed from the list before we deallocate it.
1192       MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1193       if (wrapped_barrier.barrier_.load() == 0) {
1194         // Succeeded in the meantime.
1195         is_suspended = true;
1196         continue;
1197       }
1198       thread->RemoveSuspend1Barrier(&wrapped_barrier);
1199       if (!thread->HasActiveSuspendBarrier()) {
1200         thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1201       }
1202       // Do not call Resume(), since we are probably not fully suspended.
1203       thread->DecrementSuspendCount(self,
1204                                     /*for_user_code=*/(reason == SuspendReason::kForUserCode));
1205       Thread::resume_cond_->Broadcast(self);
1206       return false;
1207     }
1208     std::string name;
1209     thread->GetThreadName(name);
1210     WrappedSuspend1Barrier* first_barrier;
1211     {
1212       MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1213       first_barrier = thread->tlsPtr_.active_suspend1_barriers;
1214     }
1215     // 'thread' should still have a suspend request pending, and hence stick around. Try to abort
1216     // there, since its stack trace is much more interesting than ours.
1217     std::string message = StringPrintf(
1218         "%s timed out: %s: state&flags: 0x%x, Java/native priority: %d/%d,"
1219         " barriers: %p, ours: %p, barrier value: %d, nsusps: %d, ncheckpts: %d, thread_info: %s",
1220         func_name,
1221         name.c_str(),
1222         thread->GetStateAndFlags(std::memory_order_relaxed).GetValue(),
1223         thread->GetNativePriority(),
1224         getpriority(PRIO_PROCESS /* really thread */, thread->GetTid()),
1225         first_barrier,
1226         &wrapped_barrier,
1227         wrapped_barrier.barrier_.load(),
1228         thread->suspended_count_ - suspended_count,
1229         thread->checkpoint_count_ - checkpoint_count,
1230         failure_info.value().c_str());
1231     // Check one last time whether thread passed the suspend barrier. Empirically this seems to
1232     // happen maybe between 1 and 5% of the time.
1233     if (wrapped_barrier.barrier_.load() != 0) {
1234       // thread still has a pointer to wrapped_barrier. Returning and continuing would be unsafe
1235       // without additional cleanup.
1236       thread->AbortInThis(message);
1237       UNREACHABLE();
1238     }
1239     is_suspended = true;
1240   }
1241   // wrapped_barrier.barrier_ will no longer be accessed.
1242   VLOG(threads) << func_name << " suspended: " << *thread;
1243   if (ATraceEnabled()) {
1244     std::string name;
1245     thread->GetThreadName(name);
1246     ATraceBegin(
1247         StringPrintf("%s suspended %s for tid=%d", func_name, name.c_str(), thread->GetTid())
1248             .c_str());
1249   }
1250   if (kIsDebugBuild) {
1251     CHECK(thread->IsSuspended());
1252     MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1253     thread->CheckBarrierInactive(&wrapped_barrier);
1254   }
1255   return true;
1256 }
1257 
SuspendThreadByPeer(jobject peer,SuspendReason reason)1258 Thread* ThreadList::SuspendThreadByPeer(jobject peer, SuspendReason reason) {
1259   Thread* const self = Thread::Current();
1260   ThreadState old_self_state = self->GetState();
1261   self->TransitionFromSuspendedToRunnable();
1262   Locks::thread_list_lock_->ExclusiveLock(self);
1263   ObjPtr<mirror::Object> thread_ptr = self->DecodeJObject(peer);
1264   Thread* thread = Thread::FromManagedThread(self, thread_ptr);
1265   if (thread == nullptr || !Contains(thread)) {
1266     if (thread == nullptr) {
1267       ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(thread_ptr);
1268       std::string thr_name = (name == nullptr ? "<unknown>" : name->AsString()->ToModifiedUtf8());
1269       LOG(WARNING) << "No such thread for suspend"
1270                    << ": " << peer << ":" << thr_name;
1271     } else {
1272       LOG(WARNING) << "SuspendThreadByPeer failed for unattached thread: "
1273                    << reinterpret_cast<void*>(thread);
1274     }
1275     Locks::thread_list_lock_->ExclusiveUnlock(self);
1276     self->TransitionFromRunnableToSuspended(old_self_state);
1277     return nullptr;
1278   }
1279   VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
1280   // Releases thread_list_lock_ and mutator lock.
1281   bool success = SuspendThread(self, thread, reason, old_self_state, __func__, 0);
1282   Locks::thread_list_lock_->AssertNotHeld(self);
1283   return success ? thread : nullptr;
1284 }
1285 
SuspendThreadByThreadId(uint32_t thread_id,SuspendReason reason,int attempt_of_4)1286 Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
1287                                             SuspendReason reason,
1288                                             int attempt_of_4) {
1289   Thread* const self = Thread::Current();
1290   ThreadState old_self_state = self->GetState();
1291   CHECK_NE(thread_id, kInvalidThreadId);
1292   VLOG(threads) << "SuspendThreadByThreadId starting";
1293   self->TransitionFromSuspendedToRunnable();
1294   Locks::thread_list_lock_->ExclusiveLock(self);
1295   Thread* thread = FindThreadByThreadId(thread_id);
1296   if (thread == nullptr) {
1297     // There's a race in inflating a lock and the owner giving up ownership and then dying.
1298     LOG(WARNING) << StringPrintf("No such thread id %d for suspend", thread_id);
1299     Locks::thread_list_lock_->ExclusiveUnlock(self);
1300     self->TransitionFromRunnableToSuspended(old_self_state);
1301     return nullptr;
1302   }
1303   DCHECK(Contains(thread));
1304   VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
1305   // Releases thread_list_lock_ and mutator lock.
1306   bool success = SuspendThread(self, thread, reason, old_self_state, __func__, attempt_of_4);
1307   Locks::thread_list_lock_->AssertNotHeld(self);
1308   return success ? thread : nullptr;
1309 }
1310 
FindThreadByThreadId(uint32_t thread_id)1311 Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
1312   for (const auto& thread : list_) {
1313     if (thread->GetThreadId() == thread_id) {
1314       return thread;
1315     }
1316   }
1317   return nullptr;
1318 }
1319 
FindThreadByTid(int tid)1320 Thread* ThreadList::FindThreadByTid(int tid) {
1321   for (const auto& thread : list_) {
1322     if (thread->GetTid() == tid) {
1323       return thread;
1324     }
1325   }
1326   return nullptr;
1327 }
1328 
WaitForOtherNonDaemonThreadsToExit(bool check_no_birth)1329 void ThreadList::WaitForOtherNonDaemonThreadsToExit(bool check_no_birth) {
1330   ScopedTrace trace(__PRETTY_FUNCTION__);
1331   Thread* self = Thread::Current();
1332   Locks::mutator_lock_->AssertNotHeld(self);
1333   while (true) {
1334     Locks::runtime_shutdown_lock_->Lock(self);
1335     if (check_no_birth) {
1336       // No more threads can be born after we start to shutdown.
1337       CHECK(Runtime::Current()->IsShuttingDownLocked());
1338       CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1339     } else {
1340       if (Runtime::Current()->NumberOfThreadsBeingBorn() != 0U) {
1341         // Awkward. Shutdown_cond_ is private, but the only live thread may not be registered yet.
1342         // Fortunately, this is used mostly for testing, and not performance-critical.
1343         Locks::runtime_shutdown_lock_->Unlock(self);
1344         usleep(1000);
1345         continue;
1346       }
1347     }
1348     MutexLock mu(self, *Locks::thread_list_lock_);
1349     Locks::runtime_shutdown_lock_->Unlock(self);
1350     // Also wait for any threads that are unregistering to finish. This is required so that no
1351     // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1352     // threads since they could unregister at the wrong time.
1353     bool done = unregistering_count_ == 0;
1354     if (done) {
1355       for (const auto& thread : list_) {
1356         if (thread != self && !thread->IsDaemon()) {
1357           done = false;
1358           break;
1359         }
1360       }
1361     }
1362     if (done) {
1363       break;
1364     }
1365     // Wait for another thread to exit before re-checking.
1366     Locks::thread_exit_cond_->Wait(self);
1367   }
1368 }
1369 
SuspendAllDaemonThreadsForShutdown()1370 void ThreadList::SuspendAllDaemonThreadsForShutdown() {
1371   ScopedTrace trace(__PRETTY_FUNCTION__);
1372   Thread* self = Thread::Current();
1373   size_t daemons_left = 0;
1374   {
1375     // Tell all the daemons it's time to suspend.
1376     MutexLock mu(self, *Locks::thread_list_lock_);
1377     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1378     for (const auto& thread : list_) {
1379       // This is only run after all non-daemon threads have exited, so the remainder should all be
1380       // daemons.
1381       CHECK(thread->IsDaemon()) << *thread;
1382       if (thread != self) {
1383         thread->IncrementSuspendCount(self);
1384         ++daemons_left;
1385       }
1386       // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
1387       // the sleep forever one.
1388       thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
1389     }
1390   }
1391   if (daemons_left == 0) {
1392     // No threads left; safe to shut down.
1393     return;
1394   }
1395   // There is not a clean way to shut down if we have daemons left. We have no mechanism for
1396   // killing them and reclaiming thread stacks. We also have no mechanism for waiting until they
1397   // have truly finished touching the memory we are about to deallocate. We do the best we can with
1398   // timeouts.
1399   //
1400   // If we have any daemons left, wait until they are (a) suspended and (b) they are not stuck
1401   // in a place where they are about to access runtime state and are not in a runnable state.
1402   // We attempt to do the latter by just waiting long enough for things to
1403   // quiesce. Examples: Monitor code or waking up from a condition variable.
1404   //
1405   // Give the threads a chance to suspend, complaining if they're slow. (a)
1406   bool have_complained = false;
1407   static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
1408   static constexpr size_t kSleepMicroseconds = 1000;
1409   bool all_suspended = false;
1410   for (size_t i = 0; !all_suspended && i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
1411     bool found_running = false;
1412     {
1413       MutexLock mu(self, *Locks::thread_list_lock_);
1414       for (const auto& thread : list_) {
1415         if (thread != self && thread->GetState() == ThreadState::kRunnable) {
1416           if (!have_complained) {
1417             LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1418             have_complained = true;
1419           }
1420           found_running = true;
1421         }
1422       }
1423     }
1424     if (found_running) {
1425       // Sleep briefly before checking again. Max total sleep time is kTimeoutMicroseconds.
1426       usleep(kSleepMicroseconds);
1427     } else {
1428       all_suspended = true;
1429     }
1430   }
1431   if (!all_suspended) {
1432     // We can get here if a daemon thread executed a fastnative native call, so that it
1433     // remained in runnable state, and then made a JNI call after we called
1434     // SetFunctionsToRuntimeShutdownFunctions(), causing it to permanently stay in a harmless
1435     // but runnable state. See b/147804269 .
1436     LOG(WARNING) << "timed out suspending all daemon threads";
1437   }
1438   // Assume all threads are either suspended or somehow wedged.
1439   // Wait again for all the now "suspended" threads to actually quiesce. (b)
1440   static constexpr size_t kDaemonSleepTime = 400'000;
1441   usleep(kDaemonSleepTime);
1442   std::list<Thread*> list_copy;
1443   {
1444     MutexLock mu(self, *Locks::thread_list_lock_);
1445     // Half-way through the wait, set the "runtime deleted" flag, causing any newly awoken
1446     // threads to immediately go back to sleep without touching memory. This prevents us from
1447     // touching deallocated memory, but it also prevents mutexes from getting released. Thus we
1448     // only do this once we're reasonably sure that no system mutexes are still held.
1449     for (const auto& thread : list_) {
1450       DCHECK(thread == self || !all_suspended || thread->GetState() != ThreadState::kRunnable);
1451       // In the !all_suspended case, the target is probably sleeping.
1452       thread->GetJniEnv()->SetRuntimeDeleted();
1453       // Possibly contended Mutex acquisitions are unsafe after this.
1454       // Releasing thread_list_lock_ is OK, since it can't block.
1455     }
1456   }
1457   // Finally wait for any threads woken before we set the "runtime deleted" flags to finish
1458   // touching memory.
1459   usleep(kDaemonSleepTime);
1460 #if defined(__has_feature)
1461 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
1462   // Sleep a bit longer with -fsanitize=address, since everything is slower.
1463   usleep(2 * kDaemonSleepTime);
1464 #endif
1465 #endif
1466   // At this point no threads should be touching our data structures anymore.
1467 }
1468 
Register(Thread * self)1469 void ThreadList::Register(Thread* self) {
1470   DCHECK_EQ(self, Thread::Current());
1471   CHECK(!shut_down_);
1472 
1473   if (VLOG_IS_ON(threads)) {
1474     std::ostringstream oss;
1475     self->ShortDump(oss);  // We don't hold the mutator_lock_ yet and so cannot call Dump.
1476     LOG(INFO) << "ThreadList::Register() " << *self  << "\n" << oss.str();
1477   }
1478 
1479   // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1480   // SuspendAll requests.
1481   MutexLock mu(self, *Locks::thread_list_lock_);
1482   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1483   if (suspend_all_count_ == 1) {
1484     self->IncrementSuspendCount(self);
1485   } else {
1486     DCHECK_EQ(suspend_all_count_, 0);
1487   }
1488   CHECK(!Contains(self));
1489   list_.push_back(self);
1490   if (gUseReadBarrier) {
1491     gc::collector::ConcurrentCopying* const cc =
1492         Runtime::Current()->GetHeap()->ConcurrentCopyingCollector();
1493     // Initialize according to the state of the CC collector.
1494     self->SetIsGcMarkingAndUpdateEntrypoints(cc->IsMarking());
1495     if (cc->IsUsingReadBarrierEntrypoints()) {
1496       self->SetReadBarrierEntrypoints();
1497     }
1498     self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
1499   }
1500 }
1501 
Unregister(Thread * self,bool should_run_callbacks)1502 void ThreadList::Unregister(Thread* self, bool should_run_callbacks) {
1503   DCHECK_EQ(self, Thread::Current());
1504   CHECK_NE(self->GetState(), ThreadState::kRunnable);
1505   Locks::mutator_lock_->AssertNotHeld(self);
1506   if (self->tls32_.disable_thread_flip_count != 0) {
1507     LOG(FATAL) << "Incomplete PrimitiveArrayCritical section at exit: " << *self << "count = "
1508                << self->tls32_.disable_thread_flip_count;
1509   }
1510 
1511   VLOG(threads) << "ThreadList::Unregister() " << *self;
1512 
1513   {
1514     MutexLock mu(self, *Locks::thread_list_lock_);
1515     ++unregistering_count_;
1516   }
1517 
1518   // Any time-consuming destruction, plus anything that can call back into managed code or
1519   // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1520   // causes the threads to join. It is important to do this after incrementing unregistering_count_
1521   // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1522   // list.
1523   self->Destroy(should_run_callbacks);
1524 
1525   uint32_t thin_lock_id = self->GetThreadId();
1526   while (true) {
1527     // Remove and delete the Thread* while holding the thread_list_lock_ and
1528     // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
1529     // Note: deliberately not using MutexLock that could hold a stale self pointer.
1530     {
1531       MutexLock mu(self, *Locks::thread_list_lock_);
1532       if (!Contains(self)) {
1533         std::string thread_name;
1534         self->GetThreadName(thread_name);
1535         std::ostringstream os;
1536         DumpNativeStack(os, GetTid(), "  native: ", nullptr);
1537         LOG(FATAL) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
1538         UNREACHABLE();
1539       } else {
1540         MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1541         Thread::StateAndFlags state_and_flags = self->GetStateAndFlags(std::memory_order_acquire);
1542         if (!state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction) &&
1543             !state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
1544           list_.remove(self);
1545           self->SignalExitFlags();
1546           break;
1547         }
1548       }
1549     }
1550     // In the case where we are not suspended yet, sleep to leave other threads time to execute.
1551     // This is important if there are realtime threads. b/111277984
1552     usleep(1);
1553     // We failed to remove the thread due to a suspend request or the like, loop and try again.
1554   }
1555 
1556   // We flush the trace buffer in Thread::Destroy. We have to check again here because once the
1557   // Thread::Destroy finishes we wait for any active suspend requests to finish before deleting
1558   // the thread. If a new trace was started during the wait period we may allocate the trace buffer
1559   // again. The trace buffer would only contain the method entry events for the methods on the stack
1560   // of an exiting thread. It is not required to flush these entries but we need to release the
1561   // buffer. Ideally we should either not generate trace events for a thread that is exiting or use
1562   // a different mechanism to report the initial events on a trace start that doesn't use per-thread
1563   // buffer. Both these approaches are not trivial to implement, so we are going with the approach
1564   // of just releasing the buffer here.
1565   if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
1566     Trace::ReleaseThreadBuffer(self);
1567   }
1568   CHECK_EQ(self->GetMethodTraceBuffer(), nullptr) << Trace::GetDebugInformation();
1569   delete self;
1570 
1571   // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1572   // temporarily have multiple threads with the same thread id. When this occurs, it causes
1573   // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1574   ReleaseThreadId(nullptr, thin_lock_id);
1575 
1576   // Clear the TLS data, so that the underlying native thread is recognizably detached.
1577   // (It may wish to reattach later.)
1578 #ifdef __BIONIC__
1579   __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
1580 #else
1581   CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
1582   Thread::self_tls_ = nullptr;
1583 #endif
1584 
1585   // Signal that a thread just detached.
1586   MutexLock mu(nullptr, *Locks::thread_list_lock_);
1587   --unregistering_count_;
1588   Locks::thread_exit_cond_->Broadcast(nullptr);
1589 }
1590 
ForEach(void (* callback)(Thread *,void *),void * context)1591 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
1592   for (const auto& thread : list_) {
1593     callback(thread, context);
1594   }
1595 }
1596 
WaitForUnregisterToComplete(Thread * self)1597 void ThreadList::WaitForUnregisterToComplete(Thread* self) {
1598   // We hold thread_list_lock_ .
1599   while (unregistering_count_ != 0) {
1600     LOG(WARNING) << "Waiting for a thread to finish unregistering";
1601     Locks::thread_exit_cond_->Wait(self);
1602   }
1603 }
1604 
VisitRootsForSuspendedThreads(RootVisitor * visitor)1605 void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
1606   Thread* const self = Thread::Current();
1607   std::vector<Thread*> threads_to_visit;
1608 
1609   // Tell threads to suspend and copy them into list.
1610   {
1611     MutexLock mu(self, *Locks::thread_list_lock_);
1612     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1613     for (Thread* thread : list_) {
1614       thread->IncrementSuspendCount(self);
1615       if (thread == self || thread->IsSuspended()) {
1616         threads_to_visit.push_back(thread);
1617       } else {
1618         thread->DecrementSuspendCount(self);
1619       }
1620     }
1621   }
1622 
1623   // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
1624   // order violations.
1625   for (Thread* thread : threads_to_visit) {
1626     thread->VisitRoots(visitor, kVisitRootFlagAllRoots);
1627   }
1628 
1629   // Restore suspend counts.
1630   {
1631     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1632     for (Thread* thread : threads_to_visit) {
1633       thread->DecrementSuspendCount(self);
1634     }
1635     Thread::resume_cond_->Broadcast(self);
1636   }
1637 }
1638 
VisitRoots(RootVisitor * visitor,VisitRootFlags flags) const1639 void ThreadList::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const {
1640   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1641   for (const auto& thread : list_) {
1642     thread->VisitRoots(visitor, flags);
1643   }
1644 }
1645 
VisitReflectiveTargets(ReflectiveValueVisitor * visitor) const1646 void ThreadList::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) const {
1647   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1648   for (const auto& thread : list_) {
1649     thread->VisitReflectiveTargets(visitor);
1650   }
1651 }
1652 
SweepInterpreterCaches(IsMarkedVisitor * visitor) const1653 void ThreadList::SweepInterpreterCaches(IsMarkedVisitor* visitor) const {
1654   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1655   for (const auto& thread : list_) {
1656     thread->SweepInterpreterCache(visitor);
1657   }
1658 }
1659 
AllocThreadId(Thread * self)1660 uint32_t ThreadList::AllocThreadId(Thread* self) {
1661   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1662   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1663     if (!allocated_ids_[i]) {
1664       allocated_ids_.set(i);
1665       return i + 1;  // Zero is reserved to mean "invalid".
1666     }
1667   }
1668   LOG(FATAL) << "Out of internal thread ids";
1669   UNREACHABLE();
1670 }
1671 
ReleaseThreadId(Thread * self,uint32_t id)1672 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
1673   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1674   --id;  // Zero is reserved to mean "invalid".
1675   DCHECK(allocated_ids_[id]) << id;
1676   allocated_ids_.reset(id);
1677 }
1678 
ScopedSuspendAll(const char * cause,bool long_suspend)1679 ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
1680   Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
1681 }
1682 
~ScopedSuspendAll()1683 ScopedSuspendAll::~ScopedSuspendAll() {
1684   Runtime::Current()->GetThreadList()->ResumeAll();
1685 }
1686 
1687 }  // namespace art
1688