xref: /aosp_15_r20/art/runtime/gc_root.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_ROOT_H_
18 #define ART_RUNTIME_GC_ROOT_H_
19 
20 #include "base/locks.h"       // For Locks::mutator_lock_.
21 #include "base/macros.h"
22 #include "mirror/object_reference.h"
23 #include "read_barrier_option.h"
24 
25 namespace art HIDDEN {
26 class ArtField;
27 class ArtMethod;
28 template<class MirrorType> class ObjPtr;
29 
30 namespace mirror {
31 class Object;
32 }  // namespace mirror
33 
34 template <size_t kBufferSize>
35 class BufferedRootVisitor;
36 
37 // Dependent on pointer size so that we don't have frames that are too big on 64 bit.
38 static const size_t kDefaultBufferedRootCount = 1024 / sizeof(void*);
39 
40 enum RootType {
41   kRootUnknown = 0,
42   kRootJNIGlobal,
43   kRootJNILocal,
44   kRootJavaFrame,
45   kRootNativeStack,
46   kRootStickyClass,
47   kRootThreadBlock,
48   kRootMonitorUsed,
49   kRootThreadObject,
50   kRootInternedString,
51   kRootFinalizing,  // used for HPROF's conversion to HprofHeapTag
52   kRootDebugger,
53   kRootReferenceCleanup,  // used for HPROF's conversion to HprofHeapTag
54   kRootVMInternal,
55   kRootJNIMonitor,
56 };
57 EXPORT std::ostream& operator<<(std::ostream& os, RootType root_type);
58 
59 // Only used by hprof. thread_id_ and type_ are only used by hprof.
60 class RootInfo {
61  public:
62   // Thread id 0 is for non thread roots.
63   explicit RootInfo(RootType type, uint32_t thread_id = 0)
type_(type)64      : type_(type), thread_id_(thread_id) {
65   }
66   RootInfo(const RootInfo&) = default;
~RootInfo()67   virtual ~RootInfo() {
68   }
GetType()69   RootType GetType() const {
70     return type_;
71   }
GetThreadId()72   uint32_t GetThreadId() const {
73     return thread_id_;
74   }
Describe(std::ostream & os)75   virtual void Describe(std::ostream& os) const {
76     os << "Type=" << type_ << " thread_id=" << thread_id_;
77   }
78   std::string ToString() const;
79 
80  private:
81   const RootType type_;
82   const uint32_t thread_id_;
83 };
84 
85 inline std::ostream& operator<<(std::ostream& os, const RootInfo& root_info) {
86   root_info.Describe(os);
87   return os;
88 }
89 
90 // Not all combinations of flags are valid. You may not visit all roots as well as the new roots
91 // (no logical reason to do this). You also may not start logging new roots and stop logging new
92 // roots (also no logical reason to do this).
93 //
94 // The precise flag ensures that more metadata is supplied. An example is vreg data for compiled
95 // method frames.
96 enum VisitRootFlags : uint8_t {
97   kVisitRootFlagAllRoots = (1 << 0),
98   kVisitRootFlagNewRoots = (1 << 1),
99   kVisitRootFlagStartLoggingNewRoots = (1 << 2),
100   kVisitRootFlagStopLoggingNewRoots = (1 << 3),
101   kVisitRootFlagClearRootLog = (1 << 4),
102   kVisitRootFlagClassLoader = (1 << 5),
103   // There is no (1 << 6).
104   kVisitRootFlagPrecise = (1 << 7),
105 };
106 
107 class RootVisitor {
108  public:
~RootVisitor()109   virtual ~RootVisitor() { }
110 
111   // Single root version, not overridable.
VisitRoot(mirror::Object ** root,const RootInfo & info)112   ALWAYS_INLINE void VisitRoot(mirror::Object** root, const RootInfo& info)
113       REQUIRES_SHARED(Locks::mutator_lock_) {
114     VisitRoots(&root, 1, info);
115   }
116 
117   // Single root version, not overridable.
VisitRootIfNonNull(mirror::Object ** root,const RootInfo & info)118   ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** root, const RootInfo& info)
119       REQUIRES_SHARED(Locks::mutator_lock_) {
120     if (*root != nullptr) {
121       VisitRoot(root, info);
122     }
123   }
124 
125   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
126       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
127 
128   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
129                           const RootInfo& info)
130       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
131 };
132 
133 // Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
134 // critical.
135 class SingleRootVisitor : public RootVisitor {
136  private:
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info)137   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
138       REQUIRES_SHARED(Locks::mutator_lock_) {
139     for (size_t i = 0; i < count; ++i) {
140       VisitRoot(*roots[i], info);
141     }
142   }
143 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info)144   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
145                           const RootInfo& info) override
146       REQUIRES_SHARED(Locks::mutator_lock_) {
147     for (size_t i = 0; i < count; ++i) {
148       VisitRoot(roots[i]->AsMirrorPtr(), info);
149     }
150   }
151 
152   virtual void VisitRoot(mirror::Object* root, const RootInfo& info) = 0;
153 };
154 
155 class GcRootSource {
156  public:
GcRootSource()157   GcRootSource()
158       : field_(nullptr), method_(nullptr) {
159   }
GcRootSource(ArtField * field)160   explicit GcRootSource(ArtField* field)
161       : field_(field), method_(nullptr) {
162   }
GcRootSource(ArtMethod * method)163   explicit GcRootSource(ArtMethod* method)
164       : field_(nullptr), method_(method) {
165   }
GetArtField()166   ArtField* GetArtField() const {
167     return field_;
168   }
GetArtMethod()169   ArtMethod* GetArtMethod() const {
170     return method_;
171   }
HasArtField()172   bool HasArtField() const {
173     return field_ != nullptr;
174   }
HasArtMethod()175   bool HasArtMethod() const {
176     return method_ != nullptr;
177   }
178 
179  private:
180   ArtField* const field_;
181   ArtMethod* const method_;
182 
183   DISALLOW_COPY_AND_ASSIGN(GcRootSource);
184 };
185 
186 // A small CompressedReference wrapper class that makes it harder to forget about read barriers.
187 // Used for references that are roots for an object graph, whether or not they are actually traced
188 // from. Requires an explicit VisitRoots call for tracing. See also Handle (implicitly traced by a
189 // GC) and StackReference (traced explicitly, but not as the result of a read barrier).
190 template<class MirrorType>
191 class GcRoot {
192  public:
193   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
194   ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const
195       REQUIRES_SHARED(Locks::mutator_lock_);
196 
197   // TODO: This is often called repeatedly from functions to process an explicit array of roots.
198   // And it calls a function that takes an array of roots. By processing a single root at a time
199   // here and turning it into a 1-element array, do we lose performance? Or does the compiler
200   // eliminate the extra work?
VisitRoot(RootVisitor * visitor,const RootInfo & info)201   void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
202       REQUIRES_SHARED(Locks::mutator_lock_) {
203     DCHECK(!IsNull());
204     mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
205     visitor->VisitRoots(roots, 1u, info);
206     DCHECK(!IsNull());
207   }
208 
VisitRootIfNonNull(RootVisitor * visitor,const RootInfo & info)209   void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
210       REQUIRES_SHARED(Locks::mutator_lock_) {
211     if (!IsNull()) {
212       VisitRoot(visitor, info);
213     }
214   }
215 
AddressWithoutBarrier()216   ALWAYS_INLINE mirror::CompressedReference<mirror::Object>* AddressWithoutBarrier() {
217     return &root_;
218   }
219 
IsNull()220   ALWAYS_INLINE bool IsNull() const {
221     // It's safe to null-check it without a read barrier.
222     return root_.IsNull();
223   }
224 
GcRoot()225   ALWAYS_INLINE GcRoot() : GcRoot(nullptr) {}
GcRoot(std::nullptr_t)226   ALWAYS_INLINE GcRoot(std::nullptr_t) : root_() {
227     DCHECK(IsNull());
228   }
229   explicit ALWAYS_INLINE GcRoot(mirror::CompressedReference<mirror::Object> ref)
230       REQUIRES_SHARED(Locks::mutator_lock_);
231   explicit ALWAYS_INLINE GcRoot(MirrorType* ref)
232       REQUIRES_SHARED(Locks::mutator_lock_);
233   explicit ALWAYS_INLINE GcRoot(ObjPtr<MirrorType> ref)
234       REQUIRES_SHARED(Locks::mutator_lock_);
235 
236  private:
237   // Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
238   // CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
239   // cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
240   mutable mirror::CompressedReference<mirror::Object> root_;
241 
242   template <size_t kBufferSize> friend class BufferedRootVisitor;
243 };
244 
245 // Simple data structure for buffered root visiting to avoid virtual dispatch overhead. Currently
246 // only for CompressedReferences since these are more common than the Object** roots which are only
247 // for thread local roots.
248 template <size_t kBufferSize>
249 class BufferedRootVisitor {
250  public:
BufferedRootVisitor(RootVisitor * visitor,const RootInfo & root_info)251   BufferedRootVisitor(RootVisitor* visitor, const RootInfo& root_info)
252       : visitor_(visitor), root_info_(root_info), buffer_pos_(0) {
253   }
254 
~BufferedRootVisitor()255   ~BufferedRootVisitor() {
256     Flush();
257   }
258 
259   template <class MirrorType>
VisitRootIfNonNull(GcRoot<MirrorType> & root)260   ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
261       REQUIRES_SHARED(Locks::mutator_lock_) {
262     if (!root.IsNull()) {
263       VisitRoot(root);
264     }
265   }
266 
267   template <class MirrorType>
VisitRootIfNonNull(mirror::CompressedReference<MirrorType> * root)268   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
269       REQUIRES_SHARED(Locks::mutator_lock_) {
270     if (!root->IsNull()) {
271       VisitRoot(root);
272     }
273   }
274 
275   template <class MirrorType>
VisitRoot(GcRoot<MirrorType> & root)276   void VisitRoot(GcRoot<MirrorType>& root) REQUIRES_SHARED(Locks::mutator_lock_) {
277     VisitRoot(root.AddressWithoutBarrier());
278   }
279 
280   template <class MirrorType>
VisitRoot(mirror::CompressedReference<MirrorType> * root)281   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
282       REQUIRES_SHARED(Locks::mutator_lock_) {
283     if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
284       Flush();
285     }
286     roots_[buffer_pos_++] = root;
287   }
288 
Flush()289   void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
290     visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
291     buffer_pos_ = 0;
292   }
293 
294  private:
295   RootVisitor* const visitor_;
296   RootInfo root_info_;
297   mirror::CompressedReference<mirror::Object>* roots_[kBufferSize];
298   size_t buffer_pos_;
299 };
300 
301 class UnbufferedRootVisitor {
302  public:
UnbufferedRootVisitor(RootVisitor * visitor,const RootInfo & root_info)303   UnbufferedRootVisitor(RootVisitor* visitor, const RootInfo& root_info)
304       : visitor_(visitor), root_info_(root_info) {}
305 
306   template <class MirrorType>
VisitRootIfNonNull(GcRoot<MirrorType> & root)307   ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root) const
308       REQUIRES_SHARED(Locks::mutator_lock_) {
309     if (!root.IsNull()) {
310       VisitRoot(root);
311     }
312   }
313 
314   template <class MirrorType>
VisitRootIfNonNull(mirror::CompressedReference<MirrorType> * root)315   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) const
316       REQUIRES_SHARED(Locks::mutator_lock_) {
317     if (!root->IsNull()) {
318       VisitRoot(root);
319     }
320   }
321 
322   template <class MirrorType>
VisitRoot(GcRoot<MirrorType> & root)323   void VisitRoot(GcRoot<MirrorType>& root) const REQUIRES_SHARED(Locks::mutator_lock_) {
324     VisitRoot(root.AddressWithoutBarrier());
325   }
326 
327   template <class MirrorType>
VisitRoot(mirror::CompressedReference<MirrorType> * root)328   void VisitRoot(mirror::CompressedReference<MirrorType>* root) const
329       REQUIRES_SHARED(Locks::mutator_lock_) {
330     visitor_->VisitRoots(&root, 1, root_info_);
331   }
332 
333  private:
334   RootVisitor* const visitor_;
335   RootInfo root_info_;
336 };
337 
338 }  // namespace art
339 
340 #endif  // ART_RUNTIME_GC_ROOT_H_
341