xref: /aosp_15_r20/art/runtime/thread.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/bit_field.h"
30 #include "base/bit_utils.h"
31 #include "base/locks.h"
32 #include "base/macros.h"
33 #include "base/pointer_size.h"
34 #include "base/safe_map.h"
35 #include "base/value_object.h"
36 #include "entrypoints/jni/jni_entrypoints.h"
37 #include "entrypoints/quick/quick_entrypoints.h"
38 #include "handle.h"
39 #include "handle_scope.h"
40 #include "interpreter/interpreter_cache.h"
41 #include "interpreter/shadow_frame.h"
42 #include "javaheapprof/javaheapsampler.h"
43 #include "jvalue.h"
44 #include "managed_stack.h"
45 #include "offsets.h"
46 #include "read_barrier_config.h"
47 #include "reflective_handle_scope.h"
48 #include "runtime_globals.h"
49 #include "runtime_stats.h"
50 #include "suspend_reason.h"
51 #include "thread_state.h"
52 
53 namespace unwindstack {
54 class AndroidLocalUnwinder;
55 }  // namespace unwindstack
56 
57 namespace art HIDDEN {
58 
59 namespace gc {
60 namespace accounting {
61 template<class T> class AtomicStack;
62 }  // namespace accounting
63 namespace collector {
64 class SemiSpace;
65 }  // namespace collector
66 }  // namespace gc
67 
68 namespace instrumentation {
69 struct InstrumentationStackFrame;
70 }  // namespace instrumentation
71 
72 namespace mirror {
73 class Array;
74 class Class;
75 class ClassLoader;
76 class Object;
77 template<class T> class ObjectArray;
78 template<class T> class PrimitiveArray;
79 using IntArray = PrimitiveArray<int32_t>;
80 class StackTraceElement;
81 class String;
82 class Throwable;
83 }  // namespace mirror
84 
85 namespace verifier {
86 class VerifierDeps;
87 }  // namespace verifier
88 
89 class ArtMethod;
90 class BaseMutex;
91 class ClassLinker;
92 class Closure;
93 class Context;
94 class DeoptimizationContextRecord;
95 class DexFile;
96 class FrameIdToShadowFrame;
97 class IsMarkedVisitor;
98 class JavaVMExt;
99 class JNIEnvExt;
100 class Monitor;
101 class RootVisitor;
102 class ScopedObjectAccessAlreadyRunnable;
103 class ShadowFrame;
104 class StackedShadowFrameRecord;
105 class Thread;
106 class ThreadList;
107 enum VisitRootFlags : uint8_t;
108 
109 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
110 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
111 // on.
112 class TLSData {
113  public:
~TLSData()114   virtual ~TLSData() {}
115 };
116 
117 // Thread priorities. These must match the Thread.MIN_PRIORITY,
118 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
119 enum ThreadPriority {
120   kMinThreadPriority = 1,
121   kNormThreadPriority = 5,
122   kMaxThreadPriority = 10,
123 };
124 
125 enum class ThreadFlag : uint32_t {
126   // If set, implies that suspend_count_ > 0 and the Thread should enter the safepoint handler.
127   kSuspendRequest = 1u << 0,
128 
129   // Request that the thread do some checkpoint work and then continue.
130   kCheckpointRequest = 1u << 1,
131 
132   // Request that the thread do empty checkpoint and then continue.
133   kEmptyCheckpointRequest = 1u << 2,
134 
135   // Register that at least 1 suspend barrier needs to be passed.
136   // Changes to this flag are guarded by suspend_count_lock_ .
137   kActiveSuspendBarrier = 1u << 3,
138 
139   // Marks that a "flip function" needs to be executed on this thread.
140   // Set only while holding thread_list_lock_.
141   kPendingFlipFunction = 1u << 4,
142 
143   // Marks that the "flip function" is being executed by another thread.
144   //
145   // This is used to guard against multiple threads trying to run the
146   // "flip function" for the same thread while the thread is suspended.
147   //
148   // Set when we have some way to ensure that the thread cannot disappear out from under us,
149   // Either:
150   //   1) Set by the thread itself,
151   //   2) by a thread holding thread_list_lock_, or
152   //   3) while the target has a pending suspension request.
153   // Once set, prevents a thread from exiting.
154   kRunningFlipFunction = 1u << 5,
155 
156   // We are responsible for resuming all other threads. We ignore suspension requests,
157   // but not checkpoint requests, until a more opportune time. GC code should
158   // in any case not check for such requests; other clients of SuspendAll might.
159   // Prevents a situation in which we are asked to suspend just before we suspend all
160   // other threads, and then notice the suspension request and suspend ourselves,
161   // leading to deadlock. Guarded by suspend_count_lock_ .
162   // Should not ever be set when we try to transition to kRunnable.
163   // TODO(b/296639267): Generalize use to prevent SuspendAll from blocking
164   // in-progress GC.
165   kSuspensionImmune = 1u << 6,
166 
167   // Request that compiled JNI stubs do not transition to Native or Runnable with
168   // inlined code, but take a slow path for monitoring method entry and exit events.
169   kMonitorJniEntryExit = 1u << 7,
170 
171   // Indicates the last flag. Used for checking that the flags do not overlap thread state.
172   kLastFlag = kMonitorJniEntryExit
173 };
174 
175 enum class StackedShadowFrameType {
176   kShadowFrameUnderConstruction,
177   kDeoptimizationShadowFrame,
178 };
179 
180 // The type of method that triggers deoptimization. It contains info on whether
181 // the deoptimized method should advance dex_pc.
182 enum class DeoptimizationMethodType {
183   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
184   kDefault     // dex pc may or may not advance depending on other conditions.
185 };
186 
187 // For the CC colector, normal weak reference access can be disabled on a per-thread basis, while
188 // processing references.  After finishing, the reference processor asynchronously sets the
189 // per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread
190 // should check its flag with acquire semantics before assuming that it is enabled. However,
191 // that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it
192 // kEnabled.  The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by
193 // resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than
194 // kVisiblyEnabled.
195 enum class WeakRefAccessState : int32_t {
196   kVisiblyEnabled = 0,  // Enabled, and previously read with acquire load by this thread.
197   kEnabled,
198   kDisabled
199 };
200 
201 // ART uses two types of ABI/code: quick and native.
202 //
203 // Quick code includes:
204 // - The code that ART compiles to, e.g: Java/dex code compiled to Arm64.
205 // - Quick assembly entrypoints.
206 //
207 // Native code includes:
208 // - Interpreter.
209 // - GC.
210 // - JNI.
211 // - Runtime methods, i.e.: all ART C++ code.
212 //
213 // In regular (non-simulator) mode, both native and quick code are of the same ISA and will operate
214 // on the hardware stack. The hardware stack is allocated by the kernel to ART and grows down in
215 // memory.
216 //
217 // In simulator mode, native and quick code use different ISA's and will use different stacks.
218 // Native code will use the hardware stack while quick code will use the simulated stack. The
219 // simulated stack is a simple buffer in the native heap owned by the Simulator class.
220 //
221 // The StackType enum reflects the underlying type of stack in use by any given function while two
222 // constexpr StackTypes (kNativeStackType and kQuickStackType) indicate which type of stack is used
223 // for native and quick code. Whenever possible kNativeStackType and kQuickStackType should be used
224 // instead of using the StackType directly.
225 enum class StackType {
226   kHardware,
227   kSimulated
228 };
229 
230 // The type of stack used when executing native code, i.e.: runtime helpers, interpreter, JNI, etc.
231 // This stack is the native machine's call stack and so should be used when comparing against
232 // values returned from builtin functions such as __builtin_frame_address.
233 static constexpr StackType kNativeStackType = StackType::kHardware;
234 
235 // The type of stack used when executing quick code, i.e.: compiled dex code and quick entrypoints.
236 // For simulator builds this is the kSimulated stack and for non-simulator builds this is the
237 // kHardware stack.
238 static constexpr StackType kQuickStackType = StackType::kHardware;
239 
240 // See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
241 struct WrappedSuspend1Barrier {
242   // TODO(b/323668816): At least weaken CHECKs to DCHECKs once the bug is fixed.
243   static constexpr int kMagic = 0xba8;
WrappedSuspend1BarrierWrappedSuspend1Barrier244   WrappedSuspend1Barrier() : magic_(kMagic), barrier_(1), next_(nullptr) {}
245   int magic_;
246   AtomicInteger barrier_;
247   struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
248 };
249 
250 // Mostly opaque structure allocated by the client of NotifyOnThreadExit.  Allows a client to
251 // check whether the thread still exists after temporarily releasing thread_list_lock_, usually
252 // because we need to wait for something.
253 class ThreadExitFlag {
254  public:
ThreadExitFlag()255   ThreadExitFlag() : exited_(false) {}
HasExited()256   bool HasExited() REQUIRES(Locks::thread_list_lock_) { return exited_; }
257 
258  private:
259   // All ThreadExitFlags associated with a thread and with exited_ == false are in a doubly linked
260   // list.  tlsPtr_.thread_exit_flags points to the first element.  first.prev_ and last.next_ are
261   // null. This list contains no ThreadExitFlags with exited_ == true;
262   ThreadExitFlag* next_ GUARDED_BY(Locks::thread_list_lock_);
263   ThreadExitFlag* prev_ GUARDED_BY(Locks::thread_list_lock_);
264   bool exited_ GUARDED_BY(Locks::thread_list_lock_);
265   friend class Thread;
266 };
267 
268 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
269 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
270 
271 static constexpr size_t kSharedMethodHotnessThreshold = 0x1fff;
272 
273 // Thread's stack layout for implicit stack overflow checks:
274 //
275 //   +---------------------+  <- highest address of stack memory
276 //   |                     |
277 //   .                     .  <- SP
278 //   |                     |
279 //   |                     |
280 //   +---------------------+  <- stack_end
281 //   |                     |
282 //   |  Gap                |
283 //   |                     |
284 //   +---------------------+  <- stack_begin
285 //   |                     |
286 //   | Protected region    |
287 //   |                     |
288 //   +---------------------+  <- lowest address of stack memory
289 //
290 // The stack always grows down in memory.  At the lowest address is a region of memory
291 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
292 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
293 // between the stack_end and the highest address in stack memory.  An implicit stack
294 // overflow check is a read of memory at a certain offset below the current SP (8K typically).
295 // If the thread's SP is below the stack_end address this will be a read into the protected
296 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
297 // at least 8K of space.  Because stack overflow checks are only performed in generated code,
298 // if the thread makes a call out to a native function (through JNI), that native function
299 // might only have 4K of memory (if the SP is adjacent to stack_end).
300 
301 class EXPORT Thread {
302  public:
303   static const size_t kStackOverflowImplicitCheckSize;
304   static constexpr bool kVerifyStack = kIsDebugBuild;
305 
306   // Creates a new native thread corresponding to the given managed peer.
307   // Used to implement Thread.start.
308   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
309 
310   // Attaches the calling native thread to the runtime, returning the new native peer.
311   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
312   static Thread* Attach(const char* thread_name,
313                         bool as_daemon,
314                         jobject thread_group,
315                         bool create_peer,
316                         bool should_run_callbacks);
317   // Attaches the calling native thread to the runtime, returning the new native peer.
318   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
319 
320   // Reset internal state of child thread after fork.
321   void InitAfterFork();
322 
323   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
324   // high cost and so we favor passing self around when possible.
325   // TODO: mark as PURE so the compiler may coalesce and remove?
326   static Thread* Current();
327 
328   // Get the thread from the JNI environment.
329   static Thread* ForEnv(JNIEnv* env);
330 
331   // For implicit overflow checks we reserve an extra piece of memory at the bottom of the stack
332   // (lowest memory). The higher portion of the memory is protected against reads and the lower is
333   // available for use while throwing the StackOverflow exception.
334   ALWAYS_INLINE static size_t GetStackOverflowProtectedSize();
335 
336   // On a runnable thread, check for pending thread suspension request and handle if pending.
337   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
338 
339   // Process pending thread suspension request and handle if pending.
340   void CheckSuspend(bool implicit = false) REQUIRES_SHARED(Locks::mutator_lock_);
341 
342   // Process a pending empty checkpoint if pending.
343   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
344   void CheckEmptyCheckpointFromMutex();
345 
346   static Thread* FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer)
347       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
348       REQUIRES_SHARED(Locks::mutator_lock_);
349   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
350       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
351       REQUIRES_SHARED(Locks::mutator_lock_);
352 
353   // Translates 172 to pAllocArrayFromCode and so on.
354   template<PointerSize size_of_pointers>
355   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
356 
357   // Dumps a one-line summary of thread state (used for operator<<).
358   void ShortDump(std::ostream& os) const;
359 
360   // Order of threads for ANRs (ANRs can be trimmed, so we print important ones first).
361   enum class DumpOrder : uint8_t {
362     kMain,     // Always print the main thread first (there might not be one).
363     kBlocked,  // Then print all threads that are blocked due to waiting on lock.
364     kLocked,   // Then print all threads that are holding some lock already.
365     kDefault,  // Print all other threads which might not be interesting for ANR.
366   };
367 
368   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
369   DumpOrder Dump(std::ostream& os,
370                  bool dump_native_stack = true,
371                  bool force_dump_stack = false) const
372       REQUIRES_SHARED(Locks::mutator_lock_);
373   DumpOrder Dump(std::ostream& os,
374                  unwindstack::AndroidLocalUnwinder& unwinder,
375                  bool dump_native_stack = true,
376                  bool force_dump_stack = false) const
377       REQUIRES_SHARED(Locks::mutator_lock_);
378 
379   DumpOrder DumpJavaStack(std::ostream& os,
380                           bool check_suspended = true,
381                           bool dump_locks = true) const
382       REQUIRES_SHARED(Locks::mutator_lock_);
383 
384   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
385   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
386   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
387       REQUIRES_SHARED(Locks::mutator_lock_);
388 
GetState()389   ThreadState GetState() const {
390     return GetStateAndFlags(std::memory_order_relaxed).GetState();
391   }
392 
393   ThreadState SetState(ThreadState new_state);
394 
GetSuspendCount()395   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
396     return tls32_.suspend_count;
397   }
398 
GetUserCodeSuspendCount()399   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
400                                                Locks::user_code_suspension_lock_) {
401     return tls32_.user_code_suspend_count;
402   }
403 
IsSuspended()404   bool IsSuspended() const {
405     // We need to ensure that once we return true, all prior accesses to the Java data by "this"
406     // thread are complete. Hence we need "acquire" ordering here, and "release" when the flags
407     // are set.
408     StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_acquire);
409     return state_and_flags.GetState() != ThreadState::kRunnable &&
410            state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
411   }
412 
DecrDefineClassCount()413   void DecrDefineClassCount() {
414     tls32_.define_class_counter--;
415   }
416 
IncrDefineClassCount()417   void IncrDefineClassCount() {
418     tls32_.define_class_counter++;
419   }
GetDefineClassCount()420   uint32_t GetDefineClassCount() const {
421     return tls32_.define_class_counter;
422   }
423 
424   // Increment suspend count and optionally install at most one suspend barrier.
425   // Must hold thread_list_lock, OR be called with self == this, so that the Thread cannot
426   // disappear while we're running. If it's known that this == self, and thread_list_lock_
427   // is not held, FakeMutexLock should be used to fake-acquire thread_list_lock_ for
428   // static checking purposes.
429   ALWAYS_INLINE
430   void IncrementSuspendCount(Thread* self,
431                              AtomicInteger* suspendall_barrier,
432                              WrappedSuspend1Barrier* suspend1_barrier,
433                              SuspendReason reason) REQUIRES(Locks::thread_suspend_count_lock_)
434       REQUIRES(Locks::thread_list_lock_);
435 
436   // The same, but default reason to kInternal, and barriers to nullptr.
437   ALWAYS_INLINE void IncrementSuspendCount(Thread* self) REQUIRES(Locks::thread_suspend_count_lock_)
438       REQUIRES(Locks::thread_list_lock_);
439 
440   // Follows one of the above calls. For_user_code indicates if SuspendReason was kForUserCode.
441   // Generally will need to be closely followed by Thread::resume_cond_->Broadcast(self);
442   // since there may be waiters. DecrementSuspendCount() itself does not do this, since we often
443   // wake more than a single thread.
444   ALWAYS_INLINE void DecrementSuspendCount(Thread* self, bool for_user_code = false)
445       REQUIRES(Locks::thread_suspend_count_lock_);
446 
447  private:
448   NO_RETURN static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread);
449 
450  public:
451   // Requests a checkpoint closure to run on another thread. The closure will be run when the
452   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
453   // originating from a compiler generated suspend point check. This returns true if the closure
454   // was added and will (eventually) be executed. It returns false if this was impossible
455   // because the thread was suspended, and we thus did nothing.
456   //
457   // Since multiple closures can be queued and some closures can delay other threads from running,
458   // no closure should attempt to suspend another thread while running.
459   // TODO We should add some debug option that verifies this.
460   //
461   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
462   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
463   // acquires it.
464   bool RequestCheckpoint(Closure* function)
465       REQUIRES(Locks::thread_suspend_count_lock_);
466 
467   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
468   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
469   // execute the checkpoint for us if it is Runnable. The wait_state is the state that the thread
470   // will go into while it is awaiting the checkpoint to be run.
471   // The closure may be run on Thread::Current() on behalf of "this" thread.
472   // Thus for lock ordering purposes, the closure should be runnable by the caller. This also
473   // sometimes makes it reasonable to pass ThreadState::kRunnable as wait_state: We may wait on
474   // a condition variable for the "this" thread to act, but for lock ordering purposes, this is
475   // exactly as though Thread::Current() had run the closure.
476   // NB Since multiple closures can be queued and some closures can delay other threads from running
477   // no closure should attempt to suspend another thread while running.
478   bool RequestSynchronousCheckpoint(Closure* function,
479                                     ThreadState wait_state = ThreadState::kWaiting)
480       REQUIRES_SHARED(Locks::mutator_lock_) RELEASE(Locks::thread_list_lock_)
481           REQUIRES(!Locks::thread_suspend_count_lock_);
482 
483   bool RequestEmptyCheckpoint()
484       REQUIRES(Locks::thread_suspend_count_lock_);
485 
GetFlipFunction()486   Closure* GetFlipFunction() { return tlsPtr_.flip_function.load(std::memory_order_relaxed); }
487 
488   // Set the flip function. This is done with all threads suspended, except for the calling thread.
489   void SetFlipFunction(Closure* function) REQUIRES(Locks::thread_suspend_count_lock_)
490       REQUIRES(Locks::thread_list_lock_);
491 
492   // Wait for the flip function to complete if still running on another thread. Assumes the "this"
493   // thread remains live.
494   void WaitForFlipFunction(Thread* self) const REQUIRES(!Locks::thread_suspend_count_lock_);
495 
496   // An enhanced version of the above that uses tef to safely return if the thread exited in the
497   // meantime.
498   void WaitForFlipFunctionTestingExited(Thread* self, ThreadExitFlag* tef)
499       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_);
500 
GetThreadLocalMarkStack()501   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
502     CHECK(gUseReadBarrier);
503     return tlsPtr_.thread_local_mark_stack;
504   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)505   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
506     CHECK(gUseReadBarrier);
507     tlsPtr_.thread_local_mark_stack = stack;
508   }
509 
GetThreadLocalGcBuffer()510   uint8_t* GetThreadLocalGcBuffer() {
511     DCHECK(gUseUserfaultfd);
512     return tlsPtr_.thread_local_gc_buffer;
513   }
SetThreadLocalGcBuffer(uint8_t * buf)514   void SetThreadLocalGcBuffer(uint8_t* buf) {
515     DCHECK(gUseUserfaultfd);
516     tlsPtr_.thread_local_gc_buffer = buf;
517   }
518 
519   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
520   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
521   // Should be called only when the kSuspensionImmune flag is clear. Requires this == Current();
522   void FullSuspendCheck(bool implicit = false)
523       REQUIRES(!Locks::thread_suspend_count_lock_)
524       REQUIRES_SHARED(Locks::mutator_lock_);
525 
526   // Transition from non-runnable to runnable state acquiring share on mutator_lock_. Returns the
527   // old state, or kInvalidState if we failed because allow_failure and kSuspensionImmune were set.
528   // Should not be called with an argument except by the next function below.
529   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable(bool fail_on_suspend_req = false)
530       REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
531 
532   // A version that does not return the old ThreadState, and fails by returning false if it would
533   // have needed to handle a pending suspension request.
TryTransitionFromSuspendedToRunnable()534   ALWAYS_INLINE bool TryTransitionFromSuspendedToRunnable()
535       REQUIRES(!Locks::thread_suspend_count_lock_)
536       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
537     // The above function does not really acquire the lock when we pass true and it returns
538     // kInvalidState. We lie in both places, but clients see correct behavior.
539     return TransitionFromSuspendedToRunnable(true) != ThreadState::kInvalidState;
540   }
541 
542   // Transition from runnable into a state where mutator privileges are denied. Releases share of
543   // mutator lock.
544   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
545       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
546       UNLOCK_FUNCTION(Locks::mutator_lock_);
547 
548   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)549   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
550     Roles::uninterruptible_.Acquire();  // No-op.
551     if (kIsDebugBuild) {
552       CHECK(cause != nullptr);
553       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
554       tls32_.no_thread_suspension++;
555       tlsPtr_.last_no_thread_suspension_cause = cause;
556       return previous_cause;
557     } else {
558       return nullptr;
559     }
560   }
561 
562   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)563   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
564     if (kIsDebugBuild) {
565       CHECK_IMPLIES(old_cause == nullptr, tls32_.no_thread_suspension == 1);
566       CHECK_GT(tls32_.no_thread_suspension, 0U);
567       tls32_.no_thread_suspension--;
568       tlsPtr_.last_no_thread_suspension_cause = old_cause;
569     }
570     Roles::uninterruptible_.Release();  // No-op.
571   }
572 
573   // End region where no thread suspension is expected. Returns the current open region in case we
574   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
575   // is larger than one.
EndAssertNoThreadSuspension()576   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
577     const char* ret = nullptr;
578     if (kIsDebugBuild) {
579       CHECK_EQ(tls32_.no_thread_suspension, 1u);
580       tls32_.no_thread_suspension--;
581       ret = tlsPtr_.last_no_thread_suspension_cause;
582       tlsPtr_.last_no_thread_suspension_cause = nullptr;
583     }
584     Roles::uninterruptible_.Release();  // No-op.
585     return ret;
586   }
587 
588   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
589 
AssertNoTransactionCheckAllowed()590   void AssertNoTransactionCheckAllowed() const {
591     CHECK(tlsPtr_.last_no_transaction_checks_cause == nullptr)
592         << tlsPtr_.last_no_transaction_checks_cause;
593   }
594 
595   // Return true if thread suspension is allowable.
596   bool IsThreadSuspensionAllowable() const;
597 
IsDaemon()598   bool IsDaemon() const {
599     return tls32_.daemon;
600   }
601 
602   size_t NumberOfHeldMutexes() const;
603 
604   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
605 
606   /*
607    * Changes the priority of this thread to match that of the java.lang.Thread object.
608    *
609    * We map a priority value from 1-10 to Linux "nice" values, where lower
610    * numbers indicate higher priority.
611    */
612   void SetNativePriority(int newPriority);
613 
614   /*
615    * Returns the priority of this thread by querying the system.
616    * This is useful when attaching a thread through JNI.
617    *
618    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
619    */
620   int GetNativePriority() const;
621 
622   // Guaranteed to be non-zero.
GetThreadId()623   uint32_t GetThreadId() const {
624     return tls32_.thin_lock_thread_id;
625   }
626 
GetTid()627   pid_t GetTid() const {
628     return tls32_.tid;
629   }
630 
631   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
632   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
633 
634   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
635   // allocation, or locking.
636   void GetThreadName(std::string& name) const;
637 
638   // Sets the thread's name.
639   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
640 
641   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
642   uint64_t GetCpuMicroTime() const;
643 
GetPeer()644   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
645     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
646     CHECK(tlsPtr_.jpeer == nullptr);
647     return tlsPtr_.opeer;
648   }
649   // GetPeer is not safe if called on another thread in the middle of the thread flip and
650   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
651   // This function will force a flip for the other thread if necessary.
652   // Since we hold a shared mutator lock, a new flip function cannot be concurrently installed.
653   // The target thread must be suspended, so that it cannot disappear during the call.
654   // We should ideally not hold thread_list_lock_ . GetReferenceKind in ti_heap.cc, currently does
655   // hold it, but in a context in which we do not invoke EnsureFlipFunctionStarted().
656   mirror::Object* GetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_);
657 
658   // A version of the above that requires thread_list_lock_, but does not require the thread to
659   // be suspended. This may temporarily release thread_list_lock_. It thus needs a ThreadExitFlag
660   // describing the thread's status, so we can tell if it exited in the interim. Returns null if
661   // the thread exited.
662   mirror::Object* LockedGetPeerFromOtherThread(ThreadExitFlag* tef)
663       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::thread_list_lock_);
664 
665   // A convenience version of the above that creates the ThreadExitFlag locally. This is often
666   // unsafe if more than one thread is being processed. A prior call may have released
667   // thread_list_lock_, and thus the NotifyOnThreadExit() call here could see a deallocated
668   // Thread. We must hold the thread_list_lock continuously between obtaining the Thread*
669   // and calling NotifyOnThreadExit().
LockedGetPeerFromOtherThread()670   mirror::Object* LockedGetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_)
671       REQUIRES(Locks::thread_list_lock_) {
672     ThreadExitFlag tef;
673     NotifyOnThreadExit(&tef);
674     mirror::Object* result = LockedGetPeerFromOtherThread(&tef);
675     UnregisterThreadExitFlag(&tef);
676     return result;
677   }
678 
HasPeer()679   bool HasPeer() const {
680     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
681   }
682 
GetStats()683   RuntimeStats* GetStats() {
684     return &tls64_.stats;
685   }
686 
687   bool IsStillStarting() const;
688 
IsExceptionPending()689   bool IsExceptionPending() const {
690     return tlsPtr_.exception != nullptr;
691   }
692 
IsAsyncExceptionPending()693   bool IsAsyncExceptionPending() const {
694     return tlsPtr_.async_exception != nullptr;
695   }
696 
GetException()697   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
698     return tlsPtr_.exception;
699   }
700 
701   void AssertPendingException() const;
702   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
703   void AssertNoPendingException() const;
704   void AssertNoPendingExceptionForNewException(const char* msg) const;
705 
706   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
707 
708   // Set an exception that is asynchronously thrown from a different thread. This will be checked
709   // periodically and might overwrite the current 'Exception'. This can only be called from a
710   // checkpoint.
711   //
712   // The caller should also make sure that the thread has been deoptimized so that the exception
713   // could be detected on back-edges.
714   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
715       REQUIRES_SHARED(Locks::mutator_lock_);
716 
ClearException()717   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
718     tlsPtr_.exception = nullptr;
719   }
720 
721   // Move the current async-exception to the main exception. This should be called when the current
722   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
723   // that needs to be dealt with, false otherwise.
724   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
725 
726   // Find catch block then prepare and return the long jump context to the appropriate exception
727   // handler. When is_method_exit_exception is true, the exception was thrown by the method exit
728   // callback and we should not send method unwind for the method on top of the stack since method
729   // exit callback was already called.
730   std::unique_ptr<Context> QuickDeliverException(bool is_method_exit_exception = false)
731       REQUIRES_SHARED(Locks::mutator_lock_);
732 
733   // Perform deoptimization. Return a `Context` prepared for a long jump.
734   std::unique_ptr<Context> Deoptimize(DeoptimizationKind kind,
735                                       bool single_frame,
736                                       bool skip_method_exit_callbacks)
737       REQUIRES_SHARED(Locks::mutator_lock_);
738 
739   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
740   // abort the runtime iff abort_on_error is true.
741   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
742                               bool check_suspended = true,
743                               bool abort_on_error = true) const
744       REQUIRES_SHARED(Locks::mutator_lock_);
745 
746   // Returns whether the given exception was thrown by the current Java method being executed
747   // (Note that this includes native Java methods).
748   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
749       REQUIRES_SHARED(Locks::mutator_lock_);
750 
SetTopOfStack(ArtMethod ** top_method)751   void SetTopOfStack(ArtMethod** top_method) {
752     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
753   }
754 
SetTopOfStackGenericJniTagged(ArtMethod ** top_method)755   void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
756     tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
757   }
758 
SetTopOfShadowStack(ShadowFrame * top)759   void SetTopOfShadowStack(ShadowFrame* top) {
760     tlsPtr_.managed_stack.SetTopShadowFrame(top);
761   }
762 
HasManagedStack()763   bool HasManagedStack() const {
764     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
765   }
766 
767   // If 'msg' is null, no detail message is set.
768   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
769       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
770 
771   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
772   // used as the new exception's cause.
773   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
774       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
775 
776   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
777       __attribute__((format(printf, 3, 4)))
778       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
779 
780   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
781       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
782 
783   // OutOfMemoryError is special, because we need to pre-allocate an instance.
784   // Only the GC should call this.
785   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
786       REQUIRES(!Roles::uninterruptible_);
787 
788   static void Startup();
789   static void FinishStartup();
790   static void Shutdown();
791 
792   // Notify this thread's thread-group that this thread has started.
793   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
794   //       is null, the thread's thread-group is loaded from the peer.
795   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
796       REQUIRES_SHARED(Locks::mutator_lock_);
797 
798   // Request notification when this thread is unregistered, typically because it has exited.
799   //
800   // The ThreadExitFlag status is only changed when we remove the thread from the thread list,
801   // which we only do once no suspend requests are outstanding, and no flip-functions are still
802   // running.
803   //
804   // The caller must allocate a fresh ThreadExitFlag, and pass it in. The caller is responsible
805   // for either waiting until the thread has exited, or unregistering the ThreadExitFlag, and
806   // then, and only then, deallocating the ThreadExitFlag.  (This scheme avoids an allocation and
807   // questions about what to do if the allocation fails. Allows detection of thread exit after
808   // temporary release of thread_list_lock_)
809   void NotifyOnThreadExit(ThreadExitFlag* tef) REQUIRES(Locks::thread_list_lock_);
810   void UnregisterThreadExitFlag(ThreadExitFlag* tef) REQUIRES(Locks::thread_list_lock_);
811 
812   // Is the ThreadExitFlag currently registered in this thread, which has not yet terminated?
813   // Intended only for testing.
814   bool IsRegistered(ThreadExitFlag* query_tef) REQUIRES(Locks::thread_list_lock_);
815 
816   // For debuggable builds, CHECK that neither first nor last, nor any ThreadExitFlag with an
817   // address in-between, is currently registered with any thread.
818   static void DCheckUnregisteredEverywhere(ThreadExitFlag* first, ThreadExitFlag* last)
819       REQUIRES(!Locks::thread_list_lock_);
820 
821   // Called when thread is unregistered. May be called repeatedly, in which case only newly
822   // registered clients are processed.
823   void SignalExitFlags() REQUIRES(Locks::thread_list_lock_);
824 
825   // JNI methods
GetJniEnv()826   JNIEnvExt* GetJniEnv() const {
827     return tlsPtr_.jni_env;
828   }
829 
830   // Convert a jobject into a Object*
831   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
832   // Checks if the weak global ref has been cleared by the GC without decoding it.
833   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
834 
GetMonitorEnterObject()835   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
836     return tlsPtr_.monitor_enter_object;
837   }
838 
SetMonitorEnterObject(mirror::Object * obj)839   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
840     tlsPtr_.monitor_enter_object = obj;
841   }
842 
843   // Implements java.lang.Thread.interrupted.
844   bool Interrupted();
845   // Implements java.lang.Thread.isInterrupted.
846   bool IsInterrupted();
847   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)848   void SetInterrupted(bool i) {
849     tls32_.interrupted.store(i, std::memory_order_seq_cst);
850   }
851   void Notify() REQUIRES(!wait_mutex_);
852 
PoisonObjectPointers()853   ALWAYS_INLINE void PoisonObjectPointers() {
854     ++poison_object_cookie_;
855   }
856 
857   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
858 
GetPoisonObjectCookie()859   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
860     return poison_object_cookie_;
861   }
862 
863   // Parking for 0ns of relative time means an untimed park, negative (though
864   // should be handled in java code) returns immediately
865   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
866   void Unpark();
867 
868  private:
869   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
870 
871  public:
GetWaitMutex()872   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
873     return wait_mutex_;
874   }
875 
GetWaitConditionVariable()876   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
877     return wait_cond_;
878   }
879 
GetWaitMonitor()880   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
881     return wait_monitor_;
882   }
883 
SetWaitMonitor(Monitor * mon)884   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
885     wait_monitor_ = mon;
886   }
887 
888   // Waiter link-list support.
GetWaitNext()889   Thread* GetWaitNext() const {
890     return tlsPtr_.wait_next;
891   }
892 
SetWaitNext(Thread * next)893   void SetWaitNext(Thread* next) {
894     tlsPtr_.wait_next = next;
895   }
896 
GetClassLoaderOverride()897   jobject GetClassLoaderOverride() {
898     return tlsPtr_.class_loader_override;
899   }
900 
901   void SetClassLoaderOverride(jobject class_loader_override);
902 
903   // Create the internal representation of a stack trace, that is more time
904   // and space efficient to compute than the StackTraceElement[].
905   ObjPtr<mirror::ObjectArray<mirror::Object>> CreateInternalStackTrace(
906       const ScopedObjectAccessAlreadyRunnable& soa) const
907       REQUIRES_SHARED(Locks::mutator_lock_);
908 
909   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
910   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
911   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
912   // with the number of valid frames in the returned array.
913   static jobjectArray InternalStackTraceToStackTraceElementArray(
914       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
915       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
916       REQUIRES_SHARED(Locks::mutator_lock_);
917 
918   static jint InternalStackTraceToStackFrameInfoArray(
919       const ScopedObjectAccessAlreadyRunnable& soa,
920       jlong mode,  // See java.lang.StackStreamFactory for the mode flags
921       jobject internal,
922       jint startLevel,
923       jint batchSize,
924       jint startIndex,
925       jobjectArray output_array)  // java.lang.StackFrameInfo[]
926       REQUIRES_SHARED(Locks::mutator_lock_);
927 
928   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
929       REQUIRES_SHARED(Locks::mutator_lock_);
930 
HasDebuggerShadowFrames()931   bool HasDebuggerShadowFrames() const {
932     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
933   }
934 
935   // This is done by GC using a checkpoint (or in a stop-the-world pause).
936   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
937 
938   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
939       REQUIRES_SHARED(Locks::mutator_lock_);
940 
941   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
942       REQUIRES(Locks::mutator_lock_);
943 
944   // Check that the thread state is valid. Try to fail if the thread has erroneously terminated.
945   // Note that once the thread has been terminated, it can also be deallocated.  But even if the
946   // thread state has been overwritten, the value is unlikely to be in the correct range.
VerifyState()947   void VerifyState() {
948     if (kIsDebugBuild) {
949       ThreadState state = GetState();
950       StateAndFlags::ValidateThreadState(state);
951       DCHECK_NE(state, ThreadState::kTerminated);
952     }
953   }
954 
VerifyStack()955   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
956     if (kVerifyStack) {
957       VerifyStackImpl();
958     }
959   }
960 
961   //
962   // Offsets of various members of native Thread class, used by compiled code.
963   //
964 
965   template<PointerSize pointer_size>
ThinLockIdOffset()966   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
967     return ThreadOffset<pointer_size>(
968         OFFSETOF_MEMBER(Thread, tls32_) +
969         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
970   }
971 
972   template<PointerSize pointer_size>
TidOffset()973   static constexpr ThreadOffset<pointer_size> TidOffset() {
974     return ThreadOffset<pointer_size>(
975         OFFSETOF_MEMBER(Thread, tls32_) +
976         OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
977   }
978 
979   template<PointerSize pointer_size>
InterruptedOffset()980   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
981     return ThreadOffset<pointer_size>(
982         OFFSETOF_MEMBER(Thread, tls32_) +
983         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
984   }
985 
986   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()987   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
988     return ThreadOffset<pointer_size>(
989         OFFSETOF_MEMBER(Thread, tls32_) +
990         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
991   }
992 
993   template<PointerSize pointer_size>
ThreadFlagsOffset()994   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
995     return ThreadOffset<pointer_size>(
996         OFFSETOF_MEMBER(Thread, tls32_) +
997         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
998   }
999 
1000   template<PointerSize pointer_size>
IsGcMarkingOffset()1001   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
1002     return ThreadOffset<pointer_size>(
1003         OFFSETOF_MEMBER(Thread, tls32_) +
1004         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
1005   }
1006 
1007   template <PointerSize pointer_size>
DeoptCheckRequiredOffset()1008   static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
1009     return ThreadOffset<pointer_size>(
1010         OFFSETOF_MEMBER(Thread, tls32_) +
1011         OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
1012   }
1013 
IsGcMarkingSize()1014   static constexpr size_t IsGcMarkingSize() {
1015     return sizeof(tls32_.is_gc_marking);
1016   }
1017 
1018   template<PointerSize pointer_size>
SharedMethodHotnessOffset()1019   static constexpr ThreadOffset<pointer_size> SharedMethodHotnessOffset() {
1020     return ThreadOffset<pointer_size>(
1021         OFFSETOF_MEMBER(Thread, tls32_) +
1022         OFFSETOF_MEMBER(tls_32bit_sized_values, shared_method_hotness));
1023   }
1024 
1025   // Deoptimize the Java stack.
1026   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
1027 
1028  private:
1029   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)1030   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
1031     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
1032     size_t scale = (pointer_size > kRuntimePointerSize) ?
1033       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
1034     size_t shrink = (kRuntimePointerSize > pointer_size) ?
1035       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
1036     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
1037   }
1038 
1039  public:
1040   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)1041   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
1042       size_t quick_entrypoint_offset) {
1043     return ThreadOffsetFromTlsPtr<pointer_size>(
1044         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
1045   }
1046 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)1047   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
1048                                                           PointerSize pointer_size) {
1049     if (pointer_size == PointerSize::k32) {
1050       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
1051           Uint32Value();
1052     } else {
1053       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
1054           Uint32Value();
1055     }
1056   }
1057 
1058   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)1059   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
1060     return ThreadOffsetFromTlsPtr<pointer_size>(
1061         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
1062   }
1063 
1064   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
1065   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)1066   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
1067     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
1068     DCHECK_LT(reg, 30u);
1069     // The ReadBarrierMarkRegX entry points are ordered by increasing
1070     // register number in Thread::tls_Ptr_.quick_entrypoints.
1071     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
1072         + static_cast<size_t>(pointer_size) * reg;
1073   }
1074 
1075   template<PointerSize pointer_size>
SelfOffset()1076   static constexpr ThreadOffset<pointer_size> SelfOffset() {
1077     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
1078   }
1079 
1080   template<PointerSize pointer_size>
ExceptionOffset()1081   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
1082     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
1083   }
1084 
1085   template<PointerSize pointer_size>
PeerOffset()1086   static constexpr ThreadOffset<pointer_size> PeerOffset() {
1087     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
1088   }
1089 
1090 
1091   template<PointerSize pointer_size>
CardTableOffset()1092   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
1093     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
1094   }
1095 
1096   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()1097   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
1098     return ThreadOffsetFromTlsPtr<pointer_size>(
1099         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
1100   }
1101 
1102   template<PointerSize pointer_size>
ThreadLocalPosOffset()1103   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
1104     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1105                                                                 thread_local_pos));
1106   }
1107 
1108   template<PointerSize pointer_size>
ThreadLocalEndOffset()1109   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
1110     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1111                                                                 thread_local_end));
1112   }
1113 
1114   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()1115   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
1116     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1117                                                                 thread_local_objects));
1118   }
1119 
1120   template<PointerSize pointer_size>
RosAllocRunsOffset()1121   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
1122     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1123                                                                 rosalloc_runs));
1124   }
1125 
1126   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()1127   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
1128     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1129                                                                 thread_local_alloc_stack_top));
1130   }
1131 
1132   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()1133   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
1134     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1135                                                                 thread_local_alloc_stack_end));
1136   }
1137 
1138   template <PointerSize pointer_size>
TraceBufferCurrPtrOffset()1139   static constexpr ThreadOffset<pointer_size> TraceBufferCurrPtrOffset() {
1140     return ThreadOffsetFromTlsPtr<pointer_size>(
1141         OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_curr_entry));
1142   }
1143 
1144   template <PointerSize pointer_size>
TraceBufferPtrOffset()1145   static constexpr ThreadOffset<pointer_size> TraceBufferPtrOffset() {
1146     return ThreadOffsetFromTlsPtr<pointer_size>(
1147         OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer));
1148   }
1149 
1150   // Size of stack less any space reserved for stack overflow
1151   template <StackType stack_type>
GetUsableStackSize()1152   size_t GetUsableStackSize() const {
1153     return GetStackSize<stack_type>() - static_cast<size_t>(
1154         GetStackEnd<stack_type>() - GetStackBegin<stack_type>());
1155   }
1156 
1157   template <StackType stack_type>
1158   ALWAYS_INLINE uint8_t* GetStackEnd() const;
1159 
1160   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
1161 
1162   // Set the stack end to that to be used during a stack overflow
1163   template <StackType stack_type>
1164   ALWAYS_INLINE void SetStackEndForStackOverflow()
1165       REQUIRES_SHARED(Locks::mutator_lock_);
1166 
1167   // Set the stack end to that to be used during regular execution
1168   template <StackType stack_type>
1169   ALWAYS_INLINE void ResetDefaultStackEnd();
1170 
1171   template <StackType stack_type>
IsHandlingStackOverflow()1172   bool IsHandlingStackOverflow() const {
1173     return GetStackEnd<stack_type>() == GetStackBegin<stack_type>();
1174   }
1175 
1176   template<PointerSize pointer_size>
StackEndOffset()1177   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
1178     return ThreadOffsetFromTlsPtr<pointer_size>(
1179         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
1180   }
1181 
1182   template<PointerSize pointer_size>
JniEnvOffset()1183   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
1184     return ThreadOffsetFromTlsPtr<pointer_size>(
1185         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
1186   }
1187 
1188   template<PointerSize pointer_size>
TopOfManagedStackOffset()1189   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
1190     return ThreadOffsetFromTlsPtr<pointer_size>(
1191         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1192         ManagedStack::TaggedTopQuickFrameOffset());
1193   }
1194 
GetManagedStack()1195   const ManagedStack* GetManagedStack() const {
1196     return &tlsPtr_.managed_stack;
1197   }
1198 
1199   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)1200   void PushManagedStackFragment(ManagedStack* fragment) {
1201     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
1202   }
PopManagedStackFragment(const ManagedStack & fragment)1203   void PopManagedStackFragment(const ManagedStack& fragment) {
1204     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
1205   }
1206 
1207   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
1208   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
1209 
1210   template<PointerSize pointer_size>
TopShadowFrameOffset()1211   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
1212     return ThreadOffsetFromTlsPtr<pointer_size>(
1213         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1214         ManagedStack::TopShadowFrameOffset());
1215   }
1216 
1217   // Is the given object on the quick stack?
1218   bool IsRawObjOnQuickStack(uint8_t* raw_obj) const;
1219 
1220   // Is the given obj in one of this thread's JNI transition frames?
1221   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
1222 
1223   // Convert a global (or weak global) jobject into a Object*
1224   ObjPtr<mirror::Object> DecodeGlobalJObject(jobject obj) const
1225       REQUIRES_SHARED(Locks::mutator_lock_);
1226 
1227   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
1228       REQUIRES_SHARED(Locks::mutator_lock_);
1229 
GetTopHandleScope()1230   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1231     return tlsPtr_.top_handle_scope;
1232   }
1233 
PushHandleScope(BaseHandleScope * handle_scope)1234   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
1235     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
1236     tlsPtr_.top_handle_scope = handle_scope;
1237   }
1238 
PopHandleScope()1239   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1240     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
1241     DCHECK(handle_scope != nullptr);
1242     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
1243     return handle_scope;
1244   }
1245 
1246   template<PointerSize pointer_size>
TopHandleScopeOffset()1247   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
1248     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1249                                                                 top_handle_scope));
1250   }
1251 
1252   template<PointerSize pointer_size>
MutatorLockOffset()1253   static constexpr ThreadOffset<pointer_size> MutatorLockOffset() {
1254     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1255                                                                 mutator_lock));
1256   }
1257 
1258   template<PointerSize pointer_size>
HeldMutexOffset(LockLevel level)1259   static constexpr ThreadOffset<pointer_size> HeldMutexOffset(LockLevel level) {
1260     DCHECK_LT(enum_cast<size_t>(level), arraysize(tlsPtr_.held_mutexes));
1261     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1262                                                                 held_mutexes[level]));
1263   }
1264 
GetTopReflectiveHandleScope()1265   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
1266     return tlsPtr_.top_reflective_handle_scope;
1267   }
1268 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)1269   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
1270     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
1271     DCHECK_EQ(scope->GetThread(), this);
1272     tlsPtr_.top_reflective_handle_scope = scope;
1273   }
1274 
PopReflectiveHandleScope()1275   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
1276     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
1277     DCHECK(handle_scope != nullptr);
1278     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
1279     return handle_scope;
1280   }
1281 
GetIsGcMarking()1282   bool GetIsGcMarking() const {
1283     DCHECK(gUseReadBarrier);
1284     return tls32_.is_gc_marking;
1285   }
1286 
1287   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
1288 
IsDeoptCheckRequired()1289   bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
1290 
SetDeoptCheckRequired(bool flag)1291   void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
1292 
1293   bool GetWeakRefAccessEnabled() const;  // Only safe for current thread.
1294 
SetWeakRefAccessEnabled(bool enabled)1295   void SetWeakRefAccessEnabled(bool enabled) {
1296     DCHECK(gUseReadBarrier);
1297     WeakRefAccessState new_state = enabled ?
1298         WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
1299     tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
1300   }
1301 
GetDisableThreadFlipCount()1302   uint32_t GetDisableThreadFlipCount() const {
1303     return tls32_.disable_thread_flip_count;
1304   }
1305 
IncrementDisableThreadFlipCount()1306   void IncrementDisableThreadFlipCount() {
1307     ++tls32_.disable_thread_flip_count;
1308   }
1309 
DecrementDisableThreadFlipCount()1310   void DecrementDisableThreadFlipCount() {
1311     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1312     --tls32_.disable_thread_flip_count;
1313   }
1314 
1315   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1316   bool IsRuntimeThread() const {
1317     return is_runtime_thread_;
1318   }
1319 
SetIsRuntimeThread(bool is_runtime_thread)1320   void SetIsRuntimeThread(bool is_runtime_thread) {
1321     is_runtime_thread_ = is_runtime_thread;
1322   }
1323 
CorePlatformApiCookie()1324   uint32_t CorePlatformApiCookie() {
1325     return core_platform_api_cookie_;
1326   }
1327 
SetCorePlatformApiCookie(uint32_t cookie)1328   void SetCorePlatformApiCookie(uint32_t cookie) {
1329     core_platform_api_cookie_ = cookie;
1330   }
1331 
1332   // Returns true if the thread is allowed to load java classes.
1333   bool CanLoadClasses() const;
1334 
1335   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1336   static mirror::Throwable* GetDeoptimizationException() {
1337     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1338     // represented by ObjPtr.
1339     return reinterpret_cast<mirror::Throwable*>(0x100);
1340   }
1341 
1342   // Currently deoptimization invokes verifier which can trigger class loading
1343   // and execute Java code, so there might be nested deoptimizations happening.
1344   // We need to save the ongoing deoptimization shadow frames and return
1345   // values on stacks.
1346   // 'from_code' denotes whether the deoptimization was explicitly made from
1347   // compiled code.
1348   // 'method_type' contains info on whether deoptimization should advance
1349   // dex_pc.
1350   void PushDeoptimizationContext(const JValue& return_value,
1351                                  bool is_reference,
1352                                  ObjPtr<mirror::Throwable> exception,
1353                                  bool from_code,
1354                                  DeoptimizationMethodType method_type)
1355       REQUIRES_SHARED(Locks::mutator_lock_);
1356   void PopDeoptimizationContext(JValue* result,
1357                                 ObjPtr<mirror::Throwable>* exception,
1358                                 bool* from_code,
1359                                 DeoptimizationMethodType* method_type)
1360       REQUIRES_SHARED(Locks::mutator_lock_);
1361   void AssertHasDeoptimizationContext()
1362       REQUIRES_SHARED(Locks::mutator_lock_);
1363   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1364   ShadowFrame* PopStackedShadowFrame();
1365   ShadowFrame* MaybePopDeoptimizedStackedShadowFrame();
1366 
1367   // For debugger, find the shadow frame that corresponds to a frame id.
1368   // Or return null if there is none.
1369   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1370       REQUIRES_SHARED(Locks::mutator_lock_);
1371   // For debugger, find the bool array that keeps track of the updated vreg set
1372   // for a frame id.
1373   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1374   // For debugger, find the shadow frame that corresponds to a frame id. If
1375   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1376   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1377                                                uint32_t num_vregs,
1378                                                ArtMethod* method,
1379                                                uint32_t dex_pc)
1380       REQUIRES_SHARED(Locks::mutator_lock_);
1381 
1382   // Delete the entry that maps from frame_id to shadow_frame.
1383   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1384       REQUIRES_SHARED(Locks::mutator_lock_);
1385 
GetStackTraceSample()1386   std::vector<ArtMethod*>* GetStackTraceSample() const {
1387     DCHECK(!IsAotCompiler());
1388     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1389   }
1390 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1391   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1392     DCHECK(!IsAotCompiler());
1393     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1394   }
1395 
GetVerifierDeps()1396   verifier::VerifierDeps* GetVerifierDeps() const {
1397     DCHECK(IsAotCompiler());
1398     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1399   }
1400 
1401   // It is the responsability of the caller to make sure the verifier_deps
1402   // entry in the thread is cleared before destruction of the actual VerifierDeps
1403   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1404   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1405     DCHECK(IsAotCompiler());
1406     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1407     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1408   }
1409 
GetMethodTraceBuffer()1410   uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
1411 
GetTraceBufferCurrEntryPtr()1412   uintptr_t** GetTraceBufferCurrEntryPtr() { return &tlsPtr_.method_trace_buffer_curr_entry; }
1413 
SetMethodTraceBuffer(uintptr_t * buffer,int init_index)1414   void SetMethodTraceBuffer(uintptr_t* buffer, int init_index) {
1415     tlsPtr_.method_trace_buffer = buffer;
1416     SetMethodTraceBufferCurrentEntry(init_index);
1417   }
1418 
SetMethodTraceBufferCurrentEntry(int index)1419   void SetMethodTraceBufferCurrentEntry(int index) {
1420     uintptr_t* buffer = tlsPtr_.method_trace_buffer;
1421     if (buffer == nullptr) {
1422       tlsPtr_.method_trace_buffer_curr_entry = nullptr;
1423     } else {
1424       DCHECK(buffer != nullptr);
1425       tlsPtr_.method_trace_buffer_curr_entry = buffer + index;
1426     }
1427   }
1428 
1429   void UpdateTlsLowOverheadTraceEntrypoints(bool enable);
1430 
GetTraceClockBase()1431   uint64_t GetTraceClockBase() const {
1432     return tls64_.trace_clock_base;
1433   }
1434 
SetTraceClockBase(uint64_t clock_base)1435   void SetTraceClockBase(uint64_t clock_base) {
1436     tls64_.trace_clock_base = clock_base;
1437   }
1438 
GetHeldMutex(LockLevel level)1439   BaseMutex* GetHeldMutex(LockLevel level) const {
1440     return tlsPtr_.held_mutexes[level];
1441   }
1442 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1443   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1444     tlsPtr_.held_mutexes[level] = mutex;
1445   }
1446 
1447   // Possibly check that no mutexes at level kMonitorLock or above are subsequently acquired.
1448   // Only invoked by the thread itself.
1449   void DisallowPreMonitorMutexes();
1450 
1451   // Undo the effect of the previous call. Again only invoked by the thread itself.
1452   void AllowPreMonitorMutexes();
1453 
ReadFlag(ThreadFlag flag)1454   bool ReadFlag(ThreadFlag flag) const {
1455     return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag);
1456   }
1457 
1458   void AtomicSetFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1459     // Since we discard the returned value, memory_order_release will often suffice.
1460     tls32_.state_and_flags.fetch_or(enum_cast<uint32_t>(flag), order);
1461   }
1462 
1463   void AtomicClearFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1464     // Since we discard the returned value, memory_order_release will often suffice.
1465     tls32_.state_and_flags.fetch_and(~enum_cast<uint32_t>(flag), order);
1466   }
1467 
1468   void ResetQuickAllocEntryPointsForThread();
1469 
1470   // Returns the remaining space in the TLAB.
TlabSize()1471   size_t TlabSize() const {
1472     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1473   }
1474 
1475   // Returns pos offset from start.
GetTlabPosOffset()1476   size_t GetTlabPosOffset() const {
1477     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1478   }
1479 
1480   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1481   size_t TlabRemainingCapacity() const {
1482     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1483   }
1484 
1485   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1486   void ExpandTlab(size_t bytes) {
1487     tlsPtr_.thread_local_end += bytes;
1488     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1489   }
1490 
1491   // Called from Concurrent mark-compact GC to slide the TLAB pointers backwards
1492   // to adjust to post-compact addresses.
1493   void AdjustTlab(size_t slide_bytes);
1494 
1495   // Doesn't check that there is room.
1496   mirror::Object* AllocTlab(size_t bytes);
1497   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1498   bool HasTlab() const;
1499   void ResetTlab();
GetTlabStart()1500   uint8_t* GetTlabStart() {
1501     return tlsPtr_.thread_local_start;
1502   }
GetTlabPos()1503   uint8_t* GetTlabPos() {
1504     return tlsPtr_.thread_local_pos;
1505   }
GetTlabEnd()1506   uint8_t* GetTlabEnd() {
1507     return tlsPtr_.thread_local_end;
1508   }
1509   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1510   // equal to a valid pointer.
RemoveSuspendTrigger()1511   void RemoveSuspendTrigger() {
1512     tlsPtr_.suspend_trigger.store(reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger),
1513                                   std::memory_order_relaxed);
1514   }
1515 
1516   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1517   // The next time a suspend check is done, it will load from the value at this address
1518   // and trigger a SIGSEGV.
1519   // Only needed if Runtime::implicit_suspend_checks_ is true. On some platforms, and in the
1520   // interpreter, client code currently just looks at the thread flags directly to determine
1521   // whether we should suspend, so this call is not always necessary.
TriggerSuspend()1522   void TriggerSuspend() { tlsPtr_.suspend_trigger.store(nullptr, std::memory_order_release); }
1523 
1524   // Push an object onto the allocation stack.
1525   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1526       REQUIRES_SHARED(Locks::mutator_lock_);
1527 
1528   // Set the thread local allocation pointers to the given pointers.
1529   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1530                                      StackReference<mirror::Object>* end);
1531 
1532   // Resets the thread local allocation pointers.
1533   void RevokeThreadLocalAllocationStack();
1534 
GetThreadLocalBytesAllocated()1535   size_t GetThreadLocalBytesAllocated() const {
1536     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1537   }
1538 
GetThreadLocalObjectsAllocated()1539   size_t GetThreadLocalObjectsAllocated() const {
1540     return tlsPtr_.thread_local_objects;
1541   }
1542 
GetRosAllocRun(size_t index)1543   void* GetRosAllocRun(size_t index) const {
1544     return tlsPtr_.rosalloc_runs[index];
1545   }
1546 
SetRosAllocRun(size_t index,void * run)1547   void SetRosAllocRun(size_t index, void* run) {
1548     tlsPtr_.rosalloc_runs[index] = run;
1549   }
1550 
1551   template <StackType stack_type>
1552   bool ProtectStack(bool fatal_on_error = true);
1553   template <StackType stack_type>
1554   bool UnprotectStack();
1555 
DecrementForceInterpreterCount()1556   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1557     return --tls32_.force_interpreter_count;
1558   }
1559 
IncrementForceInterpreterCount()1560   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1561     return ++tls32_.force_interpreter_count;
1562   }
1563 
SetForceInterpreterCount(uint32_t value)1564   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1565     tls32_.force_interpreter_count = value;
1566   }
1567 
ForceInterpreterCount()1568   uint32_t ForceInterpreterCount() const {
1569     return tls32_.force_interpreter_count;
1570   }
1571 
IsForceInterpreter()1572   bool IsForceInterpreter() const {
1573     return tls32_.force_interpreter_count != 0;
1574   }
1575 
IncrementMakeVisiblyInitializedCounter()1576   bool IncrementMakeVisiblyInitializedCounter() {
1577     tls32_.make_visibly_initialized_counter += 1u;
1578     DCHECK_LE(tls32_.make_visibly_initialized_counter, kMakeVisiblyInitializedCounterTriggerCount);
1579     if (tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount) {
1580       tls32_.make_visibly_initialized_counter = 0u;
1581       return true;
1582     }
1583     return false;
1584   }
1585 
1586   void InitStringEntryPoints();
1587 
ModifyDebugDisallowReadBarrier(int8_t delta)1588   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1589     if (kCheckDebugDisallowReadBarrierCount) {
1590       debug_disallow_read_barrier_ += delta;
1591     }
1592   }
1593 
GetDebugDisallowReadBarrierCount()1594   uint8_t GetDebugDisallowReadBarrierCount() const {
1595     return kCheckDebugDisallowReadBarrierCount ? debug_disallow_read_barrier_ : 0u;
1596   }
1597 
1598   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1599   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1600   // it from being deleted.
1601   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1602 
1603   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1604   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1605   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1606 
1607   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1608   bool IsJitSensitiveThread() const {
1609     return this == jit_sensitive_thread_;
1610   }
1611 
1612   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1613 
1614   // Cause the 'this' thread to abort the process by sending SIGABRT.  Thus we should get an
1615   // asynchronous stack trace for 'this' thread, rather than waiting for it to process a
1616   // checkpoint. Useful mostly to discover why a thread isn't responding to a suspend request or
1617   // checkpoint. The caller should "suspend" (in the Java sense) 'thread' before invoking this, so
1618   // 'thread' can't get deallocated before we access it.
1619   NO_RETURN void AbortInThis(const std::string& message);
1620 
1621   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1622   static bool IsSensitiveThread() {
1623     if (is_sensitive_thread_hook_ != nullptr) {
1624       return (*is_sensitive_thread_hook_)();
1625     }
1626     return false;
1627   }
1628 
1629   // Set to the read barrier marking entrypoints to be non-null.
1630   void SetReadBarrierEntrypoints();
1631 
1632   ObjPtr<mirror::Object> CreateCompileTimePeer(const char* name,
1633                                                bool as_daemon,
1634                                                jobject thread_group)
1635       REQUIRES_SHARED(Locks::mutator_lock_);
1636 
GetInterpreterCache()1637   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1638     return &interpreter_cache_;
1639   }
1640 
1641   // Clear all thread-local interpreter caches.
1642   //
1643   // Since the caches are keyed by memory pointer to dex instructions, this must be
1644   // called when any dex code is unloaded (before different code gets loaded at the
1645   // same memory location).
1646   //
1647   // If presence of cache entry implies some pre-conditions, this must also be
1648   // called if the pre-conditions might no longer hold true.
1649   static void ClearAllInterpreterCaches();
1650 
1651   template<PointerSize pointer_size>
InterpreterCacheOffset()1652   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1653     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1654   }
1655 
InterpreterCacheSizeLog2()1656   static constexpr int InterpreterCacheSizeLog2() {
1657     return WhichPowerOf2(InterpreterCache::kSize);
1658   }
1659 
AllThreadFlags()1660   static constexpr uint32_t AllThreadFlags() {
1661     return enum_cast<uint32_t>(ThreadFlag::kLastFlag) |
1662            (enum_cast<uint32_t>(ThreadFlag::kLastFlag) - 1u);
1663   }
1664 
SuspendOrCheckpointRequestFlags()1665   static constexpr uint32_t SuspendOrCheckpointRequestFlags() {
1666     return enum_cast<uint32_t>(ThreadFlag::kSuspendRequest) |
1667            enum_cast<uint32_t>(ThreadFlag::kCheckpointRequest) |
1668            enum_cast<uint32_t>(ThreadFlag::kEmptyCheckpointRequest);
1669   }
1670 
FlipFunctionFlags()1671   static constexpr uint32_t FlipFunctionFlags() {
1672     return enum_cast<uint32_t>(ThreadFlag::kPendingFlipFunction) |
1673            enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction);
1674   }
1675 
StoredThreadStateValue(ThreadState state)1676   static constexpr uint32_t StoredThreadStateValue(ThreadState state) {
1677     return StateAndFlags::EncodeState(state);
1678   }
1679 
ResetSharedMethodHotness()1680   void ResetSharedMethodHotness() {
1681     tls32_.shared_method_hotness = kSharedMethodHotnessThreshold;
1682   }
1683 
GetSharedMethodHotness()1684   uint32_t GetSharedMethodHotness() const {
1685     return tls32_.shared_method_hotness;
1686   }
1687 
DecrementSharedMethodHotness()1688   uint32_t DecrementSharedMethodHotness() {
1689     tls32_.shared_method_hotness = (tls32_.shared_method_hotness - 1) & 0xffff;
1690     return tls32_.shared_method_hotness;
1691   }
1692 
1693  private:
1694   // We pretend to acquire this while running a checkpoint to detect lock ordering issues.
1695   // Initialized lazily.
1696   static std::atomic<Mutex*> cp_placeholder_mutex_;
1697 
1698   explicit Thread(bool daemon);
1699 
1700   // A successfully started thread is only deleted by the thread itself.
1701   // Threads are deleted after they have been removed from the thread list while holding
1702   // suspend_count_lock_ and thread_list_lock_. We refuse to do this while either kSuspendRequest
1703   // or kRunningFlipFunction are set. We can prevent Thread destruction by holding either of those
1704   // locks, ensuring that either of those flags are set, or possibly by registering and checking a
1705   // ThreadExitFlag.
1706   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1707 
1708   // Thread destruction actions that do not invalidate the thread. Checkpoints and flip_functions
1709   // may still be called on this Thread object, though not by this thread, during and after the
1710   // Destroy() call.
1711   void Destroy(bool should_run_callbacks);
1712 
1713   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1714   // observed to be set at the same time by instrumentation.
1715   void DeleteJPeer(JNIEnv* env);
1716 
1717   // Attaches the calling native thread to the runtime, returning the new native peer.
1718   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1719   template <typename PeerAction>
1720   static Thread* Attach(const char* thread_name,
1721                         bool as_daemon,
1722                         PeerAction p,
1723                         bool should_run_callbacks);
1724 
1725   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1726 
1727   template<bool kTransactionActive>
1728   static void InitPeer(ObjPtr<mirror::Object> peer,
1729                        bool as_daemon,
1730                        ObjPtr<mirror::Object> thread_group,
1731                        ObjPtr<mirror::String> thread_name,
1732                        jint thread_priority)
1733       REQUIRES_SHARED(Locks::mutator_lock_);
1734 
1735   // Avoid use, callers should use SetState.
1736   // Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
1737   // disabled by `kStoreStackTraces = false`). May not be called on a runnable thread other
1738   // than Thread::Current().
1739   // NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
1740   // different states, so clang cannot perform the thread safety analysis.
SetStateUnsafe(ThreadState new_state)1741   ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
1742     StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1743     ThreadState old_state = old_state_and_flags.GetState();
1744     if (old_state == new_state) {
1745       // Nothing to do.
1746     } else if (old_state == ThreadState::kRunnable) {
1747       DCHECK_EQ(this, Thread::Current());
1748       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1749       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1750       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1751       TransitionToSuspendedAndRunCheckpoints(new_state);
1752       // Since we transitioned to a suspended state, check the pass barrier requests.
1753       CheckActiveSuspendBarriers();
1754     } else {
1755       while (true) {
1756         StateAndFlags new_state_and_flags = old_state_and_flags;
1757         new_state_and_flags.SetState(new_state);
1758         if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(
1759                 old_state_and_flags.GetValue(), new_state_and_flags.GetValue()))) {
1760           break;
1761         }
1762         // Reload state and flags.
1763         old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1764         DCHECK_EQ(old_state, old_state_and_flags.GetState());
1765       }
1766     }
1767     return old_state;
1768   }
1769 
GetMutatorLock()1770   MutatorMutex* GetMutatorLock() RETURN_CAPABILITY(Locks::mutator_lock_) {
1771     DCHECK_EQ(tlsPtr_.mutator_lock, Locks::mutator_lock_);
1772     return tlsPtr_.mutator_lock;
1773   }
1774 
1775   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1776 
1777   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1778   DumpOrder DumpStack(std::ostream& os,
1779                       bool dump_native_stack = true,
1780                       bool force_dump_stack = false) const
1781       REQUIRES_SHARED(Locks::mutator_lock_);
1782   DumpOrder DumpStack(std::ostream& os,
1783                       unwindstack::AndroidLocalUnwinder& unwinder,
1784                       bool dump_native_stack = true,
1785                       bool force_dump_stack = false) const
1786       REQUIRES_SHARED(Locks::mutator_lock_);
1787 
1788   // Out-of-line conveniences for debugging in gdb.
1789   static Thread* CurrentFromGdb();  // Like Thread::Current.
1790   // Like Thread::Dump(std::cerr).
1791   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1792 
1793   // A wrapper around CreateCallback used when userfaultfd GC is used to
1794   // identify the GC by stacktrace.
1795   static NO_INLINE void* CreateCallbackWithUffdGc(void* arg);
1796   static void* CreateCallback(void* arg);
1797 
1798   void HandleUncaughtExceptions() REQUIRES_SHARED(Locks::mutator_lock_);
1799   void RemoveFromThreadGroup() REQUIRES_SHARED(Locks::mutator_lock_);
1800 
1801   // Initialize a thread.
1802   //
1803   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1804   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1805   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1806   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1807   // of false).
1808   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1809       REQUIRES(Locks::runtime_shutdown_lock_);
1810   void InitCardTable();
1811   void InitCpu();
1812   void CleanupCpu();
1813   void InitTlsEntryPoints();
1814   void InitTid();
1815   void InitPthreadKeySelf();
1816   template <StackType stack_type>
1817   bool InitStack(uint8_t* read_stack_base, size_t read_stack_size, size_t read_guard_size);
1818 
1819   void SetUpAlternateSignalStack();
1820   void TearDownAlternateSignalStack();
1821   void MadviseAwayAlternateSignalStack();
1822 
1823   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1824       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
1825       REQUIRES_SHARED(Locks::mutator_lock_);
1826 
1827   // Call PassActiveSuspendBarriers() if there are active barriers. Only called on current thread.
1828   ALWAYS_INLINE void CheckActiveSuspendBarriers()
1829       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::mutator_lock_, !Roles::uninterruptible_);
1830 
1831   // Decrement all "suspend barriers" for the current thread, notifying threads that requested our
1832   // suspension. Only called on current thread, when suspended. If suspend_count_ > 0 then we
1833   // promise that we are and will remain "suspended" until the suspend count is decremented.
1834   bool PassActiveSuspendBarriers()
1835       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::mutator_lock_);
1836 
1837   // Add an entry to active_suspend1_barriers.
1838   ALWAYS_INLINE void AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1839       REQUIRES(Locks::thread_suspend_count_lock_);
1840 
1841   // Remove last-added entry from active_suspend1_barriers.
1842   // Only makes sense if we're still holding thread_suspend_count_lock_ since insertion.
1843   // We redundantly pass in the barrier to be removed in order to enable a DCHECK.
1844   ALWAYS_INLINE void RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1845       REQUIRES(Locks::thread_suspend_count_lock_);
1846 
1847   // Remove the "barrier" from the list no matter where it appears. Called only under exceptional
1848   // circumstances. The barrier must be in the list.
1849   ALWAYS_INLINE void RemoveSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1850       REQUIRES(Locks::thread_suspend_count_lock_);
1851 
1852   ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
1853 
1854   // CHECK that the given barrier is no longer on our list.
1855   ALWAYS_INLINE void CheckBarrierInactive(WrappedSuspend1Barrier* suspend1_barrier)
1856       REQUIRES(Locks::thread_suspend_count_lock_);
1857 
1858   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1859   static void SetJitSensitiveThread() {
1860     if (jit_sensitive_thread_ == nullptr) {
1861       jit_sensitive_thread_ = Thread::Current();
1862     } else {
1863       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1864           << Thread::Current()->GetTid();
1865     }
1866   }
1867 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1868   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1869     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1870   }
1871 
1872   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1873   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1874   // the kCheckpointRequest flag is cleared.
1875   void RunCheckpointFunction()
1876       REQUIRES(!Locks::thread_suspend_count_lock_)
1877       REQUIRES_SHARED(Locks::mutator_lock_);
1878   void RunEmptyCheckpoint();
1879 
1880   // Return the nearest page-aligned address below the current stack top.
1881   template <StackType>
1882   NO_INLINE uint8_t* FindStackTop();
1883 
1884   // Install the protected region for implicit stack checks.
1885   template <StackType>
1886   void InstallImplicitProtection();
1887 
1888   template <bool kPrecise>
1889   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1890 
1891   static bool IsAotCompiler();
1892 
1893   void SetCachedThreadName(const char* name);
1894 
1895   // Helper functions to get/set the tls stack pointer variables.
1896   template <StackType stack_type>
1897   ALWAYS_INLINE void SetStackEnd(uint8_t* new_stack_end);
1898 
1899   template <StackType stack_type>
1900   ALWAYS_INLINE uint8_t* GetStackBegin() const;
1901 
1902   template <StackType stack_type>
1903   ALWAYS_INLINE void SetStackBegin(uint8_t* new_stack_begin);
1904 
1905   template <StackType stack_type>
1906   ALWAYS_INLINE size_t GetStackSize() const;
1907 
1908   template <StackType stack_type>
1909   ALWAYS_INLINE void SetStackSize(size_t new_stack_size);
1910 
1911   // Helper class for manipulating the 32 bits of atomically changed state and flags.
1912   class StateAndFlags {
1913    public:
StateAndFlags(uint32_t value)1914     explicit StateAndFlags(uint32_t value) :value_(value) {}
1915 
GetValue()1916     uint32_t GetValue() const {
1917       return value_;
1918     }
1919 
SetValue(uint32_t value)1920     void SetValue(uint32_t value) {
1921       value_ = value;
1922     }
1923 
IsAnyOfFlagsSet(uint32_t flags)1924     bool IsAnyOfFlagsSet(uint32_t flags) const {
1925       DCHECK_EQ(flags & ~AllThreadFlags(), 0u);
1926       return (value_ & flags) != 0u;
1927     }
1928 
IsFlagSet(ThreadFlag flag)1929     bool IsFlagSet(ThreadFlag flag) const {
1930       return (value_ & enum_cast<uint32_t>(flag)) != 0u;
1931     }
1932 
SetFlag(ThreadFlag flag)1933     void SetFlag(ThreadFlag flag) {
1934       value_ |= enum_cast<uint32_t>(flag);
1935     }
1936 
WithFlag(ThreadFlag flag)1937     StateAndFlags WithFlag(ThreadFlag flag) const {
1938       StateAndFlags result = *this;
1939       result.SetFlag(flag);
1940       return result;
1941     }
1942 
WithoutFlag(ThreadFlag flag)1943     StateAndFlags WithoutFlag(ThreadFlag flag) const {
1944       StateAndFlags result = *this;
1945       result.ClearFlag(flag);
1946       return result;
1947     }
1948 
ClearFlag(ThreadFlag flag)1949     void ClearFlag(ThreadFlag flag) {
1950       value_ &= ~enum_cast<uint32_t>(flag);
1951     }
1952 
GetState()1953     ThreadState GetState() const {
1954       ThreadState state = ThreadStateField::Decode(value_);
1955       ValidateThreadState(state);
1956       return state;
1957     }
1958 
SetState(ThreadState state)1959     void SetState(ThreadState state) {
1960       ValidateThreadState(state);
1961       value_ = ThreadStateField::Update(state, value_);
1962     }
1963 
WithState(ThreadState state)1964     StateAndFlags WithState(ThreadState state) const {
1965       StateAndFlags result = *this;
1966       result.SetState(state);
1967       return result;
1968     }
1969 
EncodeState(ThreadState state)1970     static constexpr uint32_t EncodeState(ThreadState state) {
1971       ValidateThreadState(state);
1972       return ThreadStateField::Encode(state);
1973     }
1974 
ValidateThreadState(ThreadState state)1975     static constexpr void ValidateThreadState(ThreadState state) {
1976       if (kIsDebugBuild && state != ThreadState::kRunnable) {
1977         CHECK_GE(state, ThreadState::kTerminated);
1978         CHECK_LE(state, ThreadState::kSuspended);
1979         CHECK_NE(state, ThreadState::kObsoleteRunnable);
1980       }
1981     }
1982 
1983     // The value holds thread flags and thread state.
1984     uint32_t value_;
1985 
1986     static constexpr size_t kThreadStateBitSize = BitSizeOf<std::underlying_type_t<ThreadState>>();
1987     static constexpr size_t kThreadStatePosition = BitSizeOf<uint32_t>() - kThreadStateBitSize;
1988     using ThreadStateField = BitField<ThreadState, kThreadStatePosition, kThreadStateBitSize>;
1989     static_assert(
1990         WhichPowerOf2(enum_cast<uint32_t>(ThreadFlag::kLastFlag)) < kThreadStatePosition);
1991   };
1992   static_assert(sizeof(StateAndFlags) == sizeof(uint32_t), "Unexpected StateAndFlags size");
1993 
GetStateAndFlags(std::memory_order order)1994   StateAndFlags GetStateAndFlags(std::memory_order order) const {
1995     return StateAndFlags(tls32_.state_and_flags.load(order));
1996   }
1997 
1998   // Format state and flags as a hex string. For diagnostic output.
1999   std::string StateAndFlagsAsHexString() const;
2000 
2001   // Run the flip function and notify other threads that may have tried
2002   // to do that concurrently.
2003   void RunFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2004 
2005   // Ensure that thread flip function for thread target started running. If no other thread is
2006   // executing it, the calling thread shall run the flip function and then notify other threads
2007   // that have tried to do that concurrently. After this function returns, the
2008   // `ThreadFlag::kPendingFlipFunction` is cleared but another thread may still be running the
2009   // flip function as indicated by the `ThreadFlag::kRunningFlipFunction`. Optional arguments:
2010   //  - old_state_and_flags indicates the current and state and flags value for the thread, with
2011   //    at least kPendingFlipFunction set. The thread should logically acquire the
2012   //    mutator lock before running the flip function.  A special zero value indicates that the
2013   //    thread already holds the mutator lock, and the actual state_and_flags must be read.
2014   //    A non-zero value implies this == Current().
2015   //  - If tef is non-null, we check that the target thread has not yet exited, as indicated by
2016   //    tef. In that case, we acquire thread_list_lock_ as needed.
2017   //  - If finished is non-null, we assign to *finished to indicate whether the flip was known to
2018   //    be completed when we returned.
2019   //  Returns true if and only if we acquired the mutator lock (which implies that we ran the flip
2020   //  function after finding old_state_and_flags unchanged).
2021   static bool EnsureFlipFunctionStarted(Thread* self,
2022                                         Thread* target,
2023                                         StateAndFlags old_state_and_flags = StateAndFlags(0),
2024                                         ThreadExitFlag* tef = nullptr,
2025                                         /*out*/ bool* finished = nullptr)
2026       REQUIRES(!Locks::thread_list_lock_) TRY_ACQUIRE_SHARED(true, Locks::mutator_lock_);
2027 
2028   static void ThreadExitCallback(void* arg);
2029 
2030   // Maximum number of suspend barriers.
2031   static constexpr uint32_t kMaxSuspendBarriers = 3;
2032 
2033   // Has Thread::Startup been called?
2034   static bool is_started_;
2035 
2036   // TLS key used to retrieve the Thread*.
2037   static pthread_key_t pthread_key_self_;
2038 
2039   // Used to notify threads that they should attempt to resume, they will suspend again if
2040   // their suspend count is > 0.
2041   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2042 
2043   // Hook passed by framework which returns true
2044   // when StrictMode events are traced for the current thread.
2045   static bool (*is_sensitive_thread_hook_)();
2046   // Stores the jit sensitive thread (which for now is the UI thread).
2047   static Thread* jit_sensitive_thread_;
2048 
2049   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
2050 
2051   /***********************************************************************************************/
2052   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
2053   // pointer size differences. To encourage shorter encoding, more frequently used values appear
2054   // first if possible.
2055   /***********************************************************************************************/
2056 
2057   struct alignas(4) tls_32bit_sized_values {
2058     // We have no control over the size of 'bool', but want our boolean fields
2059     // to be 4-byte quantities.
2060     using bool32_t = uint32_t;
2061 
tls_32bit_sized_valuestls_32bit_sized_values2062     explicit tls_32bit_sized_values(bool is_daemon)
2063         : state_and_flags(0u),
2064           suspend_count(0),
2065           thin_lock_thread_id(0),
2066           tid(0),
2067           daemon(is_daemon),
2068           throwing_OutOfMemoryError(false),
2069           no_thread_suspension(0),
2070           thread_exit_check_count(0),
2071           is_gc_marking(false),
2072           is_deopt_check_required(false),
2073           weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
2074           disable_thread_flip_count(0),
2075           user_code_suspend_count(0),
2076           force_interpreter_count(0),
2077           make_visibly_initialized_counter(0),
2078           define_class_counter(0),
2079           num_name_readers(0),
2080           shared_method_hotness(kSharedMethodHotnessThreshold) {}
2081 
2082     // The state and flags field must be changed atomically so that flag values aren't lost.
2083     // See `StateAndFlags` for bit assignments of `ThreadFlag` and `ThreadState` values.
2084     // Keeping the state and flags together allows an atomic CAS to change from being
2085     // Suspended to Runnable without a suspend request occurring.
2086     Atomic<uint32_t> state_and_flags;
2087     static_assert(sizeof(state_and_flags) == sizeof(uint32_t),
2088                   "Size of state_and_flags and uint32 are different");
2089 
2090     // A non-zero value is used to tell the current thread to enter a safe point
2091     // at the next poll.
2092     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
2093 
2094     // Thin lock thread id. This is a small integer used by the thin lock implementation.
2095     // This is not to be confused with the native thread's tid, nor is it the value returned
2096     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
2097     // important difference between this id and the ids visible to managed code is that these
2098     // ones get reused (to ensure that they fit in the number of bits available).
2099     uint32_t thin_lock_thread_id;
2100 
2101     // System thread id.
2102     uint32_t tid;
2103 
2104     // Is the thread a daemon?
2105     const bool32_t daemon;
2106 
2107     // A boolean telling us whether we're recursively throwing OOME.
2108     bool32_t throwing_OutOfMemoryError;
2109 
2110     // A positive value implies we're in a region where thread suspension isn't expected.
2111     uint32_t no_thread_suspension;
2112 
2113     // How many times has our pthread key's destructor been called?
2114     uint32_t thread_exit_check_count;
2115 
2116     // True if the GC is in the marking phase. This is used for the CC collector only. This is
2117     // thread local so that we can simplify the logic to check for the fast path of read barriers of
2118     // GC roots.
2119     bool32_t is_gc_marking;
2120 
2121     // True if we need to check for deoptimization when returning from the runtime functions. This
2122     // is required only when a class is redefined to prevent executing code that has field offsets
2123     // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
2124     // set to false.
2125     bool32_t is_deopt_check_required;
2126 
2127     // Thread "interrupted" status; stays raised until queried or thrown.
2128     Atomic<bool32_t> interrupted;
2129 
2130     AtomicInteger park_state_;
2131 
2132     // Determines whether the thread is allowed to directly access a weak ref
2133     // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray.
2134     // This is used for concurrent reference processing of the CC collector only. This is thread
2135     // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race
2136     // around the time weak ref access gets disabled and concurrent reference processing begins
2137     // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use
2138     // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath().  Can be
2139     // concurrently accessed by GetReferent() and set (by iterating over threads).
2140     // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is
2141     // possible when that happens.
2142     mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled;
2143 
2144     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
2145     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
2146     // critical section enter.
2147     uint32_t disable_thread_flip_count;
2148 
2149     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
2150     // suspended by the runtime from those suspended by user code.
2151     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
2152     // told that AssertHeld should be good enough.
2153     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
2154 
2155     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
2156     // thread must remain in interpreted code as much as possible.
2157     uint32_t force_interpreter_count;
2158 
2159     // Counter for calls to initialize a class that's initialized but not visibly initialized.
2160     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
2161     // make initialized classes visibly initialized. This is needed because we usually make
2162     // classes visibly initialized in batches but we do not want to be stuck with a class
2163     // initialized but not visibly initialized for a long time even if no more classes are
2164     // being initialized anymore.
2165     uint32_t make_visibly_initialized_counter;
2166 
2167     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
2168     // for threads to be done with class-definition work.
2169     uint32_t define_class_counter;
2170 
2171     // A count of the number of readers of tlsPtr_.name that may still be looking at a string they
2172     // retrieved.
2173     mutable std::atomic<uint32_t> num_name_readers;
2174     static_assert(std::atomic<uint32_t>::is_always_lock_free);
2175 
2176     // Thread-local hotness counter for shared memory methods. Initialized with
2177     // `kSharedMethodHotnessThreshold`. The interpreter decrements it and goes
2178     // into the runtime when hitting zero. Note that all previous decrements
2179     // could have been executed by another method than the one seeing zero.
2180     // There is a second level counter in `Jit::shared_method_counters_` to make
2181     // sure we at least have a few samples before compiling a method.
2182     uint32_t shared_method_hotness;
2183   } tls32_;
2184 
2185   struct alignas(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values2186     tls_64bit_sized_values() : trace_clock_base(0) {
2187     }
2188 
2189     // The clock base used for tracing.
2190     uint64_t trace_clock_base;
2191 
2192     RuntimeStats stats;
2193   } tls64_;
2194 
2195   struct alignas(sizeof(void*)) tls_ptr_sized_values {
tls_ptr_sized_valuestls_ptr_sized_values2196       tls_ptr_sized_values() : card_table(nullptr),
2197                                exception(nullptr),
2198                                stack_end(nullptr),
2199                                managed_stack(),
2200                                suspend_trigger(nullptr),
2201                                jni_env(nullptr),
2202                                tmp_jni_env(nullptr),
2203                                self(nullptr),
2204                                opeer(nullptr),
2205                                jpeer(nullptr),
2206                                stack_begin(nullptr),
2207                                stack_size(0),
2208                                deps_or_stack_trace_sample(),
2209                                wait_next(nullptr),
2210                                monitor_enter_object(nullptr),
2211                                top_handle_scope(nullptr),
2212                                class_loader_override(nullptr),
2213                                stacked_shadow_frame_record(nullptr),
2214                                deoptimization_context_stack(nullptr),
2215                                frame_id_to_shadow_frame(nullptr),
2216                                name(nullptr),
2217                                pthread_self(0),
2218                                active_suspendall_barrier(nullptr),
2219                                active_suspend1_barriers(nullptr),
2220                                thread_local_pos(nullptr),
2221                                thread_local_end(nullptr),
2222                                thread_local_start(nullptr),
2223                                thread_local_limit(nullptr),
2224                                thread_local_objects(0),
2225                                checkpoint_function(nullptr),
2226                                thread_local_alloc_stack_top(nullptr),
2227                                thread_local_alloc_stack_end(nullptr),
2228                                mutator_lock(nullptr),
2229                                flip_function(nullptr),
2230                                thread_local_mark_stack(nullptr),
2231                                async_exception(nullptr),
2232                                top_reflective_handle_scope(nullptr),
2233                                method_trace_buffer(nullptr),
2234                                method_trace_buffer_curr_entry(nullptr),
2235                                thread_exit_flags(nullptr),
2236                                last_no_thread_suspension_cause(nullptr),
2237                                last_no_transaction_checks_cause(nullptr) {
2238       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
2239     }
2240 
2241     // The biased card table, see CardTable for details.
2242     uint8_t* card_table;
2243 
2244     // The pending exception or null.
2245     mirror::Throwable* exception;
2246 
2247     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
2248     // We leave extra space so there's room for the code that throws StackOverflowError.
2249     // Note: do not use directly, instead use GetStackEnd/SetStackEnd template function instead.
2250     uint8_t* stack_end;
2251 
2252     // The top of the managed stack often manipulated directly by compiler generated code.
2253     ManagedStack managed_stack;
2254 
2255     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
2256     // normally set to the address of itself. It should be cleared with release semantics to ensure
2257     // that prior state changes etc. are visible to any thread that faults as a result.
2258     // We assume that the kernel ensures that such changes are then visible to the faulting
2259     // thread, even if it is not an acquire load that faults. (Indeed, it seems unlikely that the
2260     // ordering semantics associated with the faulting load has any impact.)
2261     std::atomic<uintptr_t*> suspend_trigger;
2262 
2263     // Every thread may have an associated JNI environment
2264     JNIEnvExt* jni_env;
2265 
2266     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
2267     // created thread.
2268     JNIEnvExt* tmp_jni_env;
2269 
2270     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
2271     // is easy but getting the address of Thread::Current is hard. This field can be read off of
2272     // Thread::Current to give the address.
2273     Thread* self;
2274 
2275     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
2276     // start up, until the thread is registered and the local opeer_ is used.
2277     mirror::Object* opeer;
2278     jobject jpeer;
2279 
2280     // The "lowest addressable byte" of the stack.
2281     // Note: do not use directly, instead use GetStackBegin/SetStackBegin template function instead.
2282     uint8_t* stack_begin;
2283 
2284     // Size of the stack.
2285     // Note: do not use directly, instead use GetStackSize/SetStackSize template function instead.
2286     size_t stack_size;
2287 
2288     // Sampling profiler and AOT verification cannot happen on the same run, so we share
2289     // the same entry for the stack trace and the verifier deps.
2290     union DepsOrStackTraceSample {
DepsOrStackTraceSample()2291       DepsOrStackTraceSample() {
2292         verifier_deps = nullptr;
2293         stack_trace_sample = nullptr;
2294       }
2295       // Pointer to previous stack trace captured by sampling profiler.
2296       std::vector<ArtMethod*>* stack_trace_sample;
2297       // When doing AOT verification, per-thread VerifierDeps.
2298       verifier::VerifierDeps* verifier_deps;
2299     } deps_or_stack_trace_sample;
2300 
2301     // The next thread in the wait set this thread is part of or null if not waiting.
2302     Thread* wait_next;
2303 
2304     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
2305     mirror::Object* monitor_enter_object;
2306 
2307     // Top of linked list of handle scopes or null for none.
2308     BaseHandleScope* top_handle_scope;
2309 
2310     // Needed to get the right ClassLoader in JNI_OnLoad, but also
2311     // useful for testing.
2312     jobject class_loader_override;
2313 
2314     // For gc purpose, a shadow frame record stack that keeps track of:
2315     // 1) shadow frames under construction.
2316     // 2) deoptimization shadow frames.
2317     StackedShadowFrameRecord* stacked_shadow_frame_record;
2318 
2319     // Deoptimization return value record stack.
2320     DeoptimizationContextRecord* deoptimization_context_stack;
2321 
2322     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
2323     // Shadow frames may be created before deoptimization happens so that the debugger can
2324     // set local values there first.
2325     FrameIdToShadowFrame* frame_id_to_shadow_frame;
2326 
2327     // A cached copy of the java.lang.Thread's (modified UTF-8) name.
2328     // If this is not null or kThreadNameDuringStartup, then it owns the malloc memory holding
2329     // the string. Updated in an RCU-like manner.
2330     std::atomic<const char*> name;
2331     static_assert(std::atomic<const char*>::is_always_lock_free);
2332 
2333     // A cached pthread_t for the pthread underlying this Thread*.
2334     pthread_t pthread_self;
2335 
2336     // After a thread observes a suspend request and enters a suspended state,
2337     // it notifies the requestor by arriving at a "suspend barrier". This consists of decrementing
2338     // the atomic integer representing the barrier. (This implementation was introduced in 2015 to
2339     // minimize cost. There may be other options.) These atomic integer barriers are always
2340     // stored on the requesting thread's stack. They are referenced from the target thread's
2341     // data structure in one of two ways; in either case the data structure referring to these
2342     // barriers is guarded by suspend_count_lock:
2343     // 1. A SuspendAll barrier is directly referenced from the target thread. Only one of these
2344     // can be active at a time:
2345     AtomicInteger* active_suspendall_barrier GUARDED_BY(Locks::thread_suspend_count_lock_);
2346     // 2. For individual thread suspensions, active barriers are embedded in a struct that is used
2347     // to link together all suspend requests for this thread. Unlike the SuspendAll case, each
2348     // barrier is referenced by a single target thread, and thus can appear only on a single list.
2349     // The struct as a whole is still stored on the requesting thread's stack.
2350     WrappedSuspend1Barrier* active_suspend1_barriers GUARDED_BY(Locks::thread_suspend_count_lock_);
2351 
2352     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
2353     // potentially better performance.
2354     uint8_t* thread_local_pos;
2355     uint8_t* thread_local_end;
2356 
2357     // Thread-local allocation pointer. Can be moved above the preceding two to correct alignment.
2358     uint8_t* thread_local_start;
2359 
2360     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
2361     // equal to thread_local_end.
2362     uint8_t* thread_local_limit;
2363 
2364     size_t thread_local_objects;
2365 
2366     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone
2367     // requests another checkpoint, it goes to the checkpoint overflow list.
2368     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
2369 
2370     // Entrypoint function pointers.
2371     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
2372     JniEntryPoints jni_entrypoints;
2373     QuickEntryPoints quick_entrypoints;
2374 
2375     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
2376     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
2377 
2378     // Thread-local allocation stack data/routines.
2379     StackReference<mirror::Object>* thread_local_alloc_stack_top;
2380     StackReference<mirror::Object>* thread_local_alloc_stack_end;
2381 
2382     // Pointer to the mutator lock.
2383     // This is the same as `Locks::mutator_lock_` but cached for faster state transitions.
2384     MutatorMutex* mutator_lock;
2385 
2386     // Support for Mutex lock hierarchy bug detection.
2387     BaseMutex* held_mutexes[kLockLevelCount];
2388 
2389     // The function used for thread flip.  Set while holding Locks::thread_suspend_count_lock_ and
2390     // with all other threads suspended.  May be cleared while being read.
2391     std::atomic<Closure*> flip_function;
2392 
2393     union {
2394       // Thread-local mark stack for the concurrent copying collector.
2395       gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
2396       // Thread-local page-sized buffer for userfaultfd GC.
2397       uint8_t* thread_local_gc_buffer;
2398     };
2399 
2400     // The pending async-exception or null.
2401     mirror::Throwable* async_exception;
2402 
2403     // Top of the linked-list for reflective-handle scopes or null if none.
2404     BaseReflectiveHandleScope* top_reflective_handle_scope;
2405 
2406     // Pointer to a thread-local buffer for method tracing.
2407     uintptr_t* method_trace_buffer;
2408 
2409     // Pointer to the current entry in the buffer.
2410     uintptr_t* method_trace_buffer_curr_entry;
2411 
2412     // Pointer to the first node of an intrusively doubly-linked list of ThreadExitFlags.
2413     ThreadExitFlag* thread_exit_flags GUARDED_BY(Locks::thread_list_lock_);
2414 
2415     // If no_thread_suspension_ is > 0, what is causing that assertion.
2416     const char* last_no_thread_suspension_cause;
2417 
2418     // If the thread is asserting that there should be no transaction checks,
2419     // what is causing that assertion (debug builds only).
2420     const char* last_no_transaction_checks_cause;
2421   } tlsPtr_;
2422 
2423   // Small thread-local cache to be used from the interpreter.
2424   // It is keyed by dex instruction pointer.
2425   // The value is opcode-depended (e.g. field offset).
2426   InterpreterCache interpreter_cache_;
2427 
2428   // All fields below this line should not be accessed by native code. This means these fields can
2429   // be modified, rearranged, added or removed without having to modify asm_support.h
2430 
2431   // Guards the 'wait_monitor_' members.
2432   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
2433 
2434   // Condition variable waited upon during a wait.
2435   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
2436   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
2437   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
2438 
2439   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
2440   uint8_t debug_disallow_read_barrier_ = 0;
2441 
2442   // Counters used only for debugging and error reporting.  Likely to wrap.  Small to avoid
2443   // increasing Thread size.
2444   // We currently maintain these unconditionally, since it doesn't cost much, and we seem to have
2445   // persistent issues with suspension timeouts, which these should help to diagnose.
2446   // TODO: Reconsider this.
2447   std::atomic<uint8_t> suspended_count_ = 0;   // Number of times we entered a suspended state after
2448                                                // running checkpoints.
2449   std::atomic<uint8_t> checkpoint_count_ = 0;  // Number of checkpoints we started running.
2450 
2451   // Note that it is not in the packed struct, may not be accessed for cross compilation.
2452   uintptr_t poison_object_cookie_ = 0;
2453 
2454   // Pending extra checkpoints if checkpoint_function_ is already used.
2455   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2456 
2457   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
2458   // compiled code or entrypoints.
2459   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
2460       GUARDED_BY(Locks::custom_tls_lock_);
2461 
2462 #if !defined(__BIONIC__)
2463 #if !defined(ANDROID_HOST_MUSL)
2464     __attribute__((tls_model("initial-exec")))
2465 #endif
2466   static thread_local Thread* self_tls_;
2467 #endif
2468 
2469   // True if the thread is some form of runtime thread (ex, GC or JIT).
2470   bool is_runtime_thread_;
2471 
2472   // Set during execution of JNI methods that get field and method id's as part of determining if
2473   // the caller is allowed to access all fields and methods in the Core Platform API.
2474   uint32_t core_platform_api_cookie_ = 0;
2475 
2476   friend class gc::collector::SemiSpace;  // For getting stack traces.
2477   friend class Runtime;  // For CreatePeer.
2478   friend class QuickExceptionHandler;  // For dumping the stack.
2479   friend class ScopedAssertNoTransactionChecks;
2480   friend class ScopedThreadStateChange;
2481   friend class StubTest;  // For accessing entrypoints.
2482   friend class ThreadList;  // For ~Thread, Destroy and EnsureFlipFunctionStarted.
2483   friend class EntrypointsOrderTest;  // To test the order of tls entries.
2484   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
2485 
2486   DISALLOW_COPY_AND_ASSIGN(Thread);
2487 };
2488 
2489 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
2490  public:
2491   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
2492                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)2493       ACQUIRE(Roles::uninterruptible_)
2494       : enabled_(enabled) {
2495     if (!enabled_) {
2496       return;
2497     }
2498     if (kIsDebugBuild) {
2499       self_ = Thread::Current();
2500       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
2501     } else {
2502       Roles::uninterruptible_.Acquire();  // No-op.
2503     }
2504   }
~ScopedAssertNoThreadSuspension()2505   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
2506     if (!enabled_) {
2507       return;
2508     }
2509     if (kIsDebugBuild) {
2510       self_->EndAssertNoThreadSuspension(old_cause_);
2511     } else {
2512       Roles::uninterruptible_.Release();  // No-op.
2513     }
2514   }
2515 
2516  private:
2517   Thread* self_;
2518   const bool enabled_;
2519   const char* old_cause_;
2520 };
2521 
2522 class ScopedAllowThreadSuspension {
2523  public:
ScopedAllowThreadSuspension()2524   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
2525     if (kIsDebugBuild) {
2526       self_ = Thread::Current();
2527       old_cause_ = self_->EndAssertNoThreadSuspension();
2528     } else {
2529       Roles::uninterruptible_.Release();  // No-op.
2530     }
2531   }
~ScopedAllowThreadSuspension()2532   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
2533     if (kIsDebugBuild) {
2534       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
2535     } else {
2536       Roles::uninterruptible_.Acquire();  // No-op.
2537     }
2538   }
2539 
2540  private:
2541   Thread* self_;
2542   const char* old_cause_;
2543 };
2544 
2545 
2546 class ScopedStackedShadowFramePusher {
2547  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf)2548   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf) : self_(self), sf_(sf) {
2549     DCHECK_EQ(sf->GetLink(), nullptr);
2550     self_->PushStackedShadowFrame(sf, StackedShadowFrameType::kShadowFrameUnderConstruction);
2551   }
~ScopedStackedShadowFramePusher()2552   ~ScopedStackedShadowFramePusher() {
2553     ShadowFrame* sf = self_->PopStackedShadowFrame();
2554     DCHECK_EQ(sf, sf_);
2555   }
2556 
2557  private:
2558   Thread* const self_;
2559   ShadowFrame* const sf_;
2560 
2561   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
2562 };
2563 
2564 // Only works for debug builds.
2565 class ScopedDebugDisallowReadBarriers {
2566  public:
ScopedDebugDisallowReadBarriers(Thread * self)2567   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
2568     self_->ModifyDebugDisallowReadBarrier(1);
2569   }
~ScopedDebugDisallowReadBarriers()2570   ~ScopedDebugDisallowReadBarriers() {
2571     self_->ModifyDebugDisallowReadBarrier(-1);
2572   }
2573 
2574  private:
2575   Thread* const self_;
2576 };
2577 
2578 class ThreadLifecycleCallback {
2579  public:
~ThreadLifecycleCallback()2580   virtual ~ThreadLifecycleCallback() {}
2581 
2582   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2583   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2584 };
2585 
2586 // Store an exception from the thread and suppress it for the duration of this object.
2587 class ScopedExceptionStorage {
2588  public:
2589   EXPORT explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2590   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2591   EXPORT ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2592 
2593  private:
2594   Thread* self_;
2595   StackHandleScope<1> hs_;
2596   MutableHandle<mirror::Throwable> excp_;
2597 };
2598 
2599 EXPORT std::ostream& operator<<(std::ostream& os, const Thread& thread);
2600 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2601 
2602 }  // namespace art
2603 
2604 #endif  // ART_RUNTIME_THREAD_H_
2605