1 /* 2 * Copyright 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ 18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ 19 20 #include <cstdint> 21 #include <iosfwd> 22 #include <memory> 23 #include <set> 24 #include <string> 25 #include <unordered_set> 26 #include <vector> 27 28 #include "android-base/thread_annotations.h" 29 #include "base/arena_containers.h" 30 #include "base/array_ref.h" 31 #include "base/atomic.h" 32 #include "base/histogram.h" 33 #include "base/macros.h" 34 #include "base/mem_map.h" 35 #include "base/mutex.h" 36 #include "base/safe_map.h" 37 #include "compilation_kind.h" 38 #include "jit_memory_region.h" 39 #include "profiling_info.h" 40 41 namespace art HIDDEN { 42 43 class ArtMethod; 44 template<class T> class Handle; 45 class LinearAlloc; 46 class InlineCache; 47 class IsMarkedVisitor; 48 class JitJniStubTestHelper; 49 class OatQuickMethodHeader; 50 struct ProfileMethodInfo; 51 class ProfilingInfo; 52 class Thread; 53 54 namespace gc { 55 namespace accounting { 56 template<size_t kAlignment> class MemoryRangeBitmap; 57 } // namespace accounting 58 } // namespace gc 59 60 namespace mirror { 61 class Class; 62 class Object; 63 template<class T> class ObjectArray; 64 } // namespace mirror 65 66 namespace gc { 67 namespace accounting { 68 template<size_t kAlignment> class MemoryRangeBitmap; 69 } // namespace accounting 70 } // namespace gc 71 72 namespace mirror { 73 class Class; 74 class Object; 75 template<class T> class ObjectArray; 76 } // namespace mirror 77 78 namespace jit { 79 80 class MarkCodeClosure; 81 82 // Type of bitmap used for tracking live functions in the JIT code cache for the purposes 83 // of garbage collecting code. 84 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>; 85 86 // The state of profile-based compilation in the zygote. 87 // - kInProgress: JIT compilation is happening 88 // - kDone: JIT compilation is finished, and the zygote is preparing notifying 89 // the other processes. 90 // - kNotifiedOk: the zygote has notified the other processes, which can start 91 // sharing the boot image method mappings. 92 // - kNotifiedFailure: the zygote has notified the other processes, but they 93 // cannot share the boot image method mappings due to 94 // unexpected errors 95 enum class ZygoteCompilationState : uint8_t { 96 kInProgress = 0, 97 kDone = 1, 98 kNotifiedOk = 2, 99 kNotifiedFailure = 3, 100 }; 101 102 // Class abstraction over a map of ArtMethod -> compiled code, where the 103 // ArtMethod are compiled by the zygote, and the map acts as a communication 104 // channel between the zygote and the other processes. 105 // For the zygote process, this map is the only map it is placing the compiled 106 // code. JitCodeCache.method_code_map_ is empty. 107 // 108 // This map is writable only by the zygote, and readable by all children. 109 class ZygoteMap { 110 public: 111 struct Entry { 112 ArtMethod* method; 113 // Note we currently only allocate code in the low 4g, so we could just reserve 4 bytes 114 // for the code pointer. For simplicity and in the case we move to 64bit 115 // addresses for code, just keep it void* for now. 116 const void* code_ptr; 117 }; 118 ZygoteMap(JitMemoryRegion * region)119 explicit ZygoteMap(JitMemoryRegion* region) 120 : map_(), region_(region), compilation_state_(nullptr) {} 121 122 // Initialize the data structure so it can hold `number_of_methods` mappings. 123 // Note that the map is fixed size and never grows. 124 void Initialize(uint32_t number_of_methods) REQUIRES(!Locks::jit_lock_); 125 126 // Add the mapping method -> code. 127 void Put(const void* code, ArtMethod* method) REQUIRES(Locks::jit_lock_); 128 129 // Return the code pointer for the given method. If pc is not zero, check that 130 // the pc falls into that code range. Return null otherwise. 131 const void* GetCodeFor(ArtMethod* method, uintptr_t pc = 0) const; 132 133 // Return whether the map has associated code for the given method. ContainsMethod(ArtMethod * method)134 bool ContainsMethod(ArtMethod* method) const { 135 return GetCodeFor(method) != nullptr; 136 } 137 SetCompilationState(ZygoteCompilationState state)138 void SetCompilationState(ZygoteCompilationState state) { 139 DCHECK_LT(static_cast<uint8_t>(*compilation_state_), static_cast<uint8_t>(state)); 140 region_->WriteData(compilation_state_, state); 141 } 142 IsCompilationDoneButNotNotified()143 bool IsCompilationDoneButNotNotified() const { 144 return compilation_state_ != nullptr && *compilation_state_ == ZygoteCompilationState::kDone; 145 } 146 IsCompilationNotified()147 bool IsCompilationNotified() const { 148 return compilation_state_ != nullptr && *compilation_state_ > ZygoteCompilationState::kDone; 149 } 150 CanMapBootImageMethods()151 bool CanMapBootImageMethods() const { 152 return compilation_state_ != nullptr && 153 *compilation_state_ == ZygoteCompilationState::kNotifiedOk; 154 } 155 cbegin()156 ArrayRef<const Entry>::const_iterator cbegin() const { 157 return map_.cbegin(); 158 } begin()159 ArrayRef<const Entry>::iterator begin() { 160 return map_.begin(); 161 } cend()162 ArrayRef<const Entry>::const_iterator cend() const { 163 return map_.cend(); 164 } end()165 ArrayRef<const Entry>::iterator end() { 166 return map_.end(); 167 } 168 169 private: 170 // The map allocated with `region_`. 171 ArrayRef<const Entry> map_; 172 173 // The region in which the map is allocated. 174 JitMemoryRegion* const region_; 175 176 // The current state of compilation in the zygote. Starts with kInProgress, 177 // and should end with kNotifiedOk or kNotifiedFailure. 178 const ZygoteCompilationState* compilation_state_; 179 180 DISALLOW_COPY_AND_ASSIGN(ZygoteMap); 181 }; 182 183 class JitCodeCache { 184 public: 185 static constexpr size_t kMaxCapacity = 64 * MB; 186 187 // Default initial capacity of the JIT code cache. GetInitialCapacity()188 static size_t GetInitialCapacity() { 189 // This function is called during static initialization 190 // when gPageSize might not be available yet. 191 const size_t page_size = GetPageSizeSlow(); 192 193 // Put the default to a very low amount for debug builds to stress the code cache 194 // collection. It should be at least two pages, however, as the storage is split 195 // into data and code sections with sizes that should be aligned to page size each 196 // as that's the unit mspaces use. See also: JitMemoryRegion::Initialize. 197 return std::max(kIsDebugBuild ? 8 * KB : 64 * KB, 2 * page_size); 198 } 199 200 // Create the code cache with a code + data capacity equal to "capacity", error message is passed 201 // in the out arg error_msg. 202 static JitCodeCache* Create(bool used_only_for_profile_data, 203 bool rwx_memory_allowed, 204 bool is_zygote, 205 std::string* error_msg); 206 ~JitCodeCache(); 207 208 bool NotifyCompilationOf(ArtMethod* method, 209 Thread* self, 210 CompilationKind compilation_kind, 211 bool prejit) 212 REQUIRES_SHARED(Locks::mutator_lock_) 213 REQUIRES(!Locks::jit_lock_); 214 215 EXPORT void NotifyMethodRedefined(ArtMethod* method) 216 REQUIRES(Locks::mutator_lock_) 217 REQUIRES(!Locks::jit_lock_); 218 219 // Notify to the code cache that the compiler wants to use the 220 // profiling info of `method` to drive optimizations, 221 // and therefore ensure the returned profiling info object is not 222 // collected. 223 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self) 224 REQUIRES_SHARED(Locks::mutator_lock_) 225 REQUIRES(!Locks::jit_lock_); 226 227 void DoneCompiling(ArtMethod* method, Thread* self) 228 REQUIRES_SHARED(Locks::mutator_lock_) 229 REQUIRES(!Locks::jit_lock_); 230 231 void DoneCompilerUse(ArtMethod* method, Thread* self) 232 REQUIRES_SHARED(Locks::mutator_lock_) 233 REQUIRES(!Locks::jit_lock_); 234 235 // Return true if the code cache contains this pc. 236 EXPORT bool ContainsPc(const void* pc) const; 237 238 // Return true if the code cache contains this pc in the private region (i.e. not from zygote). 239 bool PrivateRegionContainsPc(const void* pc) const; 240 241 // Return true if the code cache contains this method. 242 EXPORT bool ContainsMethod(ArtMethod* method) 243 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); 244 245 // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise. 246 const void* GetJniStubCode(ArtMethod* method) 247 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); 248 249 // Allocate a region for both code and data in the JIT code cache. 250 // The reserved memory is left completely uninitialized. 251 bool Reserve(Thread* self, 252 JitMemoryRegion* region, 253 size_t code_size, 254 size_t stack_map_size, 255 size_t number_of_roots, 256 ArtMethod* method, 257 /*out*/ArrayRef<const uint8_t>* reserved_code, 258 /*out*/ArrayRef<const uint8_t>* reserved_data) 259 REQUIRES_SHARED(Locks::mutator_lock_) 260 REQUIRES(!Locks::jit_lock_); 261 262 // Initialize code and data of previously allocated memory. 263 // 264 // `cha_single_implementation_list` needs to be registered via CHA (if it's 265 // still valid), since the compiled code still needs to be invalidated if the 266 // single-implementation assumptions are violated later. This needs to be done 267 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA 268 // guard elimination. 269 bool Commit(Thread* self, 270 JitMemoryRegion* region, 271 ArtMethod* method, 272 ArrayRef<const uint8_t> reserved_code, // Uninitialized destination. 273 ArrayRef<const uint8_t> code, // Compiler output (source). 274 ArrayRef<const uint8_t> reserved_data, // Uninitialized destination. 275 const std::vector<Handle<mirror::Object>>& roots, 276 ArrayRef<const uint8_t> stack_map, // Compiler output (source). 277 const std::vector<uint8_t>& debug_info, 278 bool is_full_debug_info, 279 CompilationKind compilation_kind, 280 const ArenaSet<ArtMethod*>& cha_single_implementation_list) 281 REQUIRES_SHARED(Locks::mutator_lock_) 282 REQUIRES(!Locks::jit_lock_); 283 284 // Free the previously allocated memory regions. 285 void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) 286 REQUIRES_SHARED(Locks::mutator_lock_) 287 REQUIRES(!Locks::jit_lock_); 288 void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) 289 REQUIRES(Locks::jit_lock_); 290 291 void IncreaseCodeCacheCapacity(Thread* self) 292 REQUIRES(!Locks::jit_lock_) 293 REQUIRES_SHARED(Locks::mutator_lock_); 294 295 // Given the 'pc', try to find the JIT compiled code associated with it. 'method' may be null 296 // when LookupMethodHeader is called from MarkCodeClosure::Run() in debug builds. Return null 297 // if 'pc' is not in the code cache. 298 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method) 299 REQUIRES(!Locks::jit_lock_) 300 REQUIRES_SHARED(Locks::mutator_lock_); 301 302 EXPORT OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method) 303 REQUIRES(!Locks::jit_lock_) 304 REQUIRES_SHARED(Locks::mutator_lock_); 305 306 // Removes method from the cache for testing purposes. The caller 307 // must ensure that all threads are suspended and the method should 308 // not be in any thread's stack. 309 EXPORT bool RemoveMethod(ArtMethod* method, bool release_memory) 310 REQUIRES(!Locks::jit_lock_) 311 REQUIRES(Locks::mutator_lock_); 312 313 // Remove all methods in our cache that were allocated by 'alloc'. 314 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) 315 REQUIRES(!Locks::jit_lock_) 316 REQUIRES_SHARED(Locks::mutator_lock_); 317 318 void CopyInlineCacheInto(const InlineCache& ic, 319 /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) 320 REQUIRES(!Locks::jit_lock_) 321 REQUIRES_SHARED(Locks::mutator_lock_); 322 323 // Create a 'ProfileInfo' for 'method'. 324 ProfilingInfo* AddProfilingInfo(Thread* self, 325 ArtMethod* method, 326 const std::vector<uint32_t>& inline_cache_entries, 327 const std::vector<uint32_t>& branch_cache_entries) 328 REQUIRES(!Locks::jit_lock_) 329 REQUIRES_SHARED(Locks::mutator_lock_); 330 OwnsSpace(const void * mspace)331 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS { 332 return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace); 333 } 334 335 void* MoreCore(const void* mspace, intptr_t increment); 336 337 // Adds to `methods` all profiled methods which are part of any of the given dex locations. 338 // Saves inline caches for a method if its hotness meets `inline_cache_threshold` after being 339 // baseline compiled. 340 EXPORT void GetProfiledMethods(const std::set<std::string>& dex_base_locations, 341 std::vector<ProfileMethodInfo>& methods, 342 uint16_t inline_cache_threshold) REQUIRES(!Locks::jit_lock_) 343 REQUIRES_SHARED(Locks::mutator_lock_); 344 345 EXPORT void InvalidateAllCompiledCode() 346 REQUIRES(!Locks::jit_lock_) 347 REQUIRES_SHARED(Locks::mutator_lock_); 348 349 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code) 350 REQUIRES(!Locks::jit_lock_) 351 REQUIRES_SHARED(Locks::mutator_lock_); 352 353 void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_); 354 void DumpAllCompiledMethods(std::ostream& os) 355 REQUIRES(!Locks::jit_lock_) 356 REQUIRES_SHARED(Locks::mutator_lock_); 357 358 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_); 359 360 // Visit GC roots (except j.l.Class and j.l.String) held by JIT-ed code. 361 template<typename RootVisitorType> 362 EXPORT void VisitRootTables(ArtMethod* method, 363 RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS; 364 365 void SweepRootTables(IsMarkedVisitor* visitor) 366 REQUIRES(!Locks::jit_lock_) 367 REQUIRES_SHARED(Locks::mutator_lock_); 368 369 // The GC needs to disallow the reading of inline caches when it processes them, 370 // to avoid having a class being used while it is being deleted. 371 void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_); 372 void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_); 373 void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_); 374 375 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer 376 // 'new_method' since it is being made obsolete. 377 EXPORT void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) 378 REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); 379 380 // Dynamically change whether we want to garbage collect code. 381 EXPORT void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_); 382 383 bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_); 384 385 // Unsafe variant for debug checks. GetGarbageCollectCodeUnsafe()386 bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS { 387 return garbage_collect_code_; 388 } GetZygoteMap()389 ZygoteMap* GetZygoteMap() { 390 return &zygote_map_; 391 } 392 393 // Fetch the code of a method that was JITted, but the JIT could not 394 // update its entrypoint due to the resolution trampoline. 395 const void* GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) 396 REQUIRES(!Locks::jit_lock_) 397 REQUIRES_SHARED(Locks::mutator_lock_); 398 399 EXPORT void PostForkChildAction(bool is_system_server, bool is_zygote); 400 401 // Clear the entrypoints of JIT compiled methods that belong in the zygote space. 402 // This is used for removing non-debuggable JIT code at the point we realize the runtime 403 // is debuggable. Also clear the Precompiled flag from all methods so the non-debuggable code 404 // doesn't come back. 405 EXPORT void TransitionToDebuggable() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); 406 407 JitMemoryRegion* GetCurrentRegion(); IsSharedRegion(const JitMemoryRegion & region)408 bool IsSharedRegion(const JitMemoryRegion& region) const { return ®ion == &shared_region_; } CanAllocateProfilingInfo()409 bool CanAllocateProfilingInfo() { 410 // If we don't have a private region, we cannot allocate a profiling info. 411 // A shared region doesn't support in general GC objects, which a profiling info 412 // can reference. 413 JitMemoryRegion* region = GetCurrentRegion(); 414 return region->IsValid() && !IsSharedRegion(*region); 415 } 416 417 // Return whether the given `ptr` is in the zygote executable memory space. IsInZygoteExecSpace(const void * ptr)418 bool IsInZygoteExecSpace(const void* ptr) const { 419 return shared_region_.IsInExecSpace(ptr); 420 } 421 422 ProfilingInfo* GetProfilingInfo(ArtMethod* method, Thread* self); 423 void MaybeUpdateInlineCache(ArtMethod* method, 424 uint32_t dex_pc, 425 ObjPtr<mirror::Class> cls, 426 Thread* self) 427 REQUIRES_SHARED(Locks::mutator_lock_); 428 429 // NO_THREAD_SAFETY_ANALYSIS because we may be called with the JIT lock held 430 // or not. The implementation of this method handles the two cases. 431 void AddZombieCode(ArtMethod* method, const void* code_ptr) NO_THREAD_SAFETY_ANALYSIS; 432 433 EXPORT void DoCollection(Thread* self) 434 REQUIRES(!Locks::jit_lock_); 435 436 private: 437 JitCodeCache(); 438 439 void AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) 440 REQUIRES(Locks::jit_mutator_lock_) 441 REQUIRES_SHARED(Locks::mutator_lock_); 442 443 ProfilingInfo* AddProfilingInfoInternal(Thread* self, 444 ArtMethod* method, 445 const std::vector<uint32_t>& inline_cache_entries, 446 const std::vector<uint32_t>& branch_cache_entries) 447 REQUIRES(Locks::jit_lock_) 448 REQUIRES_SHARED(Locks::mutator_lock_); 449 450 // If a collection is in progress, wait for it to finish. Return 451 // whether the thread actually waited. 452 bool WaitForPotentialCollectionToComplete(Thread* self) 453 REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(!Locks::mutator_lock_); 454 455 // Remove CHA dependents and underlying allocations for entries in `method_headers`. 456 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers) 457 REQUIRES(Locks::jit_lock_) 458 REQUIRES(!Locks::cha_lock_); 459 460 // Removes method from the cache. The caller must ensure that all threads 461 // are suspended and the method should not be in any thread's stack. 462 bool RemoveMethodLocked(ArtMethod* method, bool release_memory) 463 REQUIRES(Locks::jit_lock_) 464 REQUIRES(Locks::mutator_lock_); 465 466 // Call given callback for every compiled method in the code cache. 467 void VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) 468 REQUIRES_SHARED(Locks::jit_mutator_lock_); 469 470 // Free code and data allocations for `code_ptr`. 471 void FreeCodeAndData(const void* code_ptr) 472 REQUIRES(Locks::jit_lock_); 473 474 // Number of bytes allocated in the code cache. 475 size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_); 476 477 // Number of bytes allocated in the data cache. 478 size_t DataCacheSize() REQUIRES(!Locks::jit_lock_); 479 480 // Number of bytes allocated in the code cache. 481 size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_); 482 483 // Number of bytes allocated in the data cache. 484 size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_); 485 486 // Return whether the code cache's capacity is at its maximum. 487 bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_); 488 489 void RemoveUnmarkedCode(Thread* self) 490 REQUIRES(!Locks::jit_lock_) 491 REQUIRES_SHARED(Locks::mutator_lock_); 492 493 void MarkCompiledCodeOnThreadStacks(Thread* self) 494 REQUIRES(!Locks::jit_lock_) 495 REQUIRES_SHARED(Locks::mutator_lock_); 496 GetLiveBitmap()497 CodeCacheBitmap* GetLiveBitmap() const { 498 return live_bitmap_.get(); 499 } 500 IsInZygoteDataSpace(const void * ptr)501 bool IsInZygoteDataSpace(const void* ptr) const { 502 return shared_region_.IsInDataSpace(ptr); 503 } 504 GetReservedCapacity()505 size_t GetReservedCapacity() { 506 return reserved_capacity_; 507 } 508 509 bool IsWeakAccessEnabled(Thread* self) const; 510 void WaitUntilInlineCacheAccessible(Thread* self) 511 REQUIRES(!Locks::jit_lock_) 512 REQUIRES_SHARED(Locks::mutator_lock_); 513 514 EXPORT const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr); 515 516 class JniStubKey; 517 class JniStubData; 518 519 // Whether the GC allows accessing weaks in inline caches. Note that this 520 // is not used by the concurrent collector, which uses 521 // Thread::SetWeakRefAccessEnabled instead. 522 Atomic<bool> is_weak_access_enabled_; 523 524 // Condition to wait on for accessing inline caches. 525 ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_); 526 527 // Reserved capacity of the JIT code cache. 528 const size_t reserved_capacity_; 529 530 // By default, do not GC until reaching four times the initial capacity. 531 static constexpr size_t kReservedCapacityMultiplier = 4; 532 533 // -------------- JIT memory regions ------------------------------------- // 534 535 // Shared region, inherited from the zygote. 536 JitMemoryRegion shared_region_; 537 538 // Process's own region. 539 JitMemoryRegion private_region_; 540 541 // -------------- Global JIT maps --------------------------------------- // 542 543 // Note: The methods held in these maps may be dead, so we must ensure that we do not use 544 // read barriers on their declaring classes as that could unnecessarily keep them alive or 545 // crash the GC, depending on the GC phase and particular GC's details. Asserting that we 546 // do not emit read barriers for these methods can be tricky as we're allowed to emit read 547 // barriers for other methods that are known to be alive, such as the method being compiled. 548 // The GC must ensure that methods in these maps are cleaned up with `RemoveMethodsIn()` 549 // before the declaring class memory is freed. 550 551 // Holds compiled code associated with the shorty for a JNI stub. 552 SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_mutator_lock_); 553 554 // Holds compiled code associated to the ArtMethod. 555 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_mutator_lock_); 556 // Subset of `method_code_map_`, but keyed by `ArtMethod*`. Used to treat certain 557 // objects (like `MethodType`-s) as strongly reachable from the corresponding ArtMethod. 558 SafeMap<ArtMethod*, std::vector<const void*>> method_code_map_reversed_ 559 GUARDED_BY(Locks::jit_mutator_lock_); 560 561 // Holds compiled code associated to the ArtMethod. Used when pre-jitting 562 // methods whose entrypoints have the resolution stub. 563 SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_mutator_lock_); 564 565 // Holds osr compiled code associated to the ArtMethod. 566 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_mutator_lock_); 567 568 // Zombie code and JNI methods to consider for collection. 569 std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_mutator_lock_); 570 std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_mutator_lock_); 571 572 // ProfilingInfo objects we have allocated. Mutators don't need to access 573 // these so this can be guarded by the JIT lock. 574 SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_); 575 576 // Methods that the zygote has compiled and can be shared across processes 577 // forked from the zygote. 578 ZygoteMap zygote_map_; 579 580 // -------------- JIT GC related data structures ----------------------- // 581 582 // Condition to wait on during collection and for accessing weak references in inline caches. 583 ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_); 584 585 // Whether there is a code cache collection in progress. 586 bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_); 587 588 // Whether a GC task is already scheduled. 589 std::atomic<bool> gc_task_scheduled_; 590 591 // Bitmap for collecting code and data. 592 std::unique_ptr<CodeCacheBitmap> live_bitmap_; 593 594 // Whether we can do garbage collection. Not 'const' as tests may override this. 595 bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_); 596 597 // Zombie code being processed by the GC. 598 std::set<const void*> processed_zombie_code_ GUARDED_BY(Locks::jit_lock_); 599 std::set<ArtMethod*> processed_zombie_jni_code_ GUARDED_BY(Locks::jit_lock_); 600 601 // ---------------- JIT statistics -------------------------------------- // 602 603 // Number of baseline compilations done throughout the lifetime of the JIT. 604 size_t number_of_baseline_compilations_ GUARDED_BY(Locks::jit_lock_); 605 606 // Number of optimized compilations done throughout the lifetime of the JIT. 607 size_t number_of_optimized_compilations_ GUARDED_BY(Locks::jit_lock_); 608 609 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT. 610 size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_); 611 612 // Number of code cache collections done throughout the lifetime of the JIT. 613 size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_); 614 615 // Histograms for keeping track of stack map size statistics. 616 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_); 617 618 // Histograms for keeping track of code size statistics. 619 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_); 620 621 // Histograms for keeping track of profiling info statistics. 622 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_); 623 624 friend class ScopedCodeCacheWrite; 625 friend class MarkCodeClosure; 626 627 DISALLOW_COPY_AND_ASSIGN(JitCodeCache); 628 }; 629 630 } // namespace jit 631 } // namespace art 632 633 #endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ 634