xref: /aosp_15_r20/art/runtime/jit/jit_code_cache.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_code_cache.h"
18 
19 #include <sstream>
20 
21 #include <android-base/logging.h>
22 
23 #include "arch/context.h"
24 #include "art_method-inl.h"
25 #include "base/histogram-inl.h"
26 #include "base/logging.h"  // For VLOG.
27 #include "base/membarrier.h"
28 #include "base/memfd.h"
29 #include "base/mem_map.h"
30 #include "base/pointer_size.h"
31 #include "base/quasi_atomic.h"
32 #include "base/stl_util.h"
33 #include "base/systrace.h"
34 #include "base/time_utils.h"
35 #include "base/utils.h"
36 #include "cha.h"
37 #include "debugger_interface.h"
38 #include "dex/dex_file_loader.h"
39 #include "dex/method_reference.h"
40 #include "entrypoints/entrypoint_utils-inl.h"
41 #include "entrypoints/runtime_asm_entrypoints.h"
42 #include "gc/accounting/bitmap-inl.h"
43 #include "gc/allocator/art-dlmalloc.h"
44 #include "gc/scoped_gc_critical_section.h"
45 #include "handle.h"
46 #include "handle_scope-inl.h"
47 #include "instrumentation.h"
48 #include "intern_table.h"
49 #include "jit/jit.h"
50 #include "jit/profiling_info.h"
51 #include "jit/jit_scoped_code_cache_write.h"
52 #include "linear_alloc.h"
53 #include "mirror/method_type.h"
54 #include "oat/oat_file-inl.h"
55 #include "oat/oat_quick_method_header.h"
56 #include "object_callbacks.h"
57 #include "profile/profile_compilation_info.h"
58 #include "scoped_thread_state_change-inl.h"
59 #include "stack.h"
60 #include "thread-current-inl.h"
61 #include "thread-inl.h"
62 #include "thread_list.h"
63 #include "well_known_classes-inl.h"
64 
65 namespace art HIDDEN {
66 namespace jit {
67 
68 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
69 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
70 
71 class JitCodeCache::JniStubKey {
72  public:
REQUIRES_SHARED(Locks::mutator_lock_)73   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
74       : shorty_(method->GetShorty()),
75         is_static_(method->IsStatic()),
76         is_fast_native_(method->IsFastNative()),
77         is_critical_native_(method->IsCriticalNative()),
78         is_synchronized_(method->IsSynchronized()) {
79     DCHECK(!(is_fast_native_ && is_critical_native_));
80   }
81 
operator <(const JniStubKey & rhs) const82   bool operator<(const JniStubKey& rhs) const {
83     if (is_static_ != rhs.is_static_) {
84       return rhs.is_static_;
85     }
86     if (is_synchronized_ != rhs.is_synchronized_) {
87       return rhs.is_synchronized_;
88     }
89     if (is_fast_native_ != rhs.is_fast_native_) {
90       return rhs.is_fast_native_;
91     }
92     if (is_critical_native_ != rhs.is_critical_native_) {
93       return rhs.is_critical_native_;
94     }
95     return strcmp(shorty_, rhs.shorty_) < 0;
96   }
97 
98   // Update the shorty to point to another method's shorty. Call this function when removing
99   // the method that references the old shorty from JniCodeData and not removing the entire
100   // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
UpdateShorty(ArtMethod * method) const101   void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
102     const char* shorty = method->GetShorty();
103     DCHECK_STREQ(shorty_, shorty);
104     shorty_ = shorty;
105   }
106 
107  private:
108   // The shorty points to a DexFile data and may need to change
109   // to point to the same shorty in a different DexFile.
110   mutable const char* shorty_;
111 
112   const bool is_static_;
113   const bool is_fast_native_;
114   const bool is_critical_native_;
115   const bool is_synchronized_;
116 };
117 
118 class JitCodeCache::JniStubData {
119  public:
JniStubData()120   JniStubData() : code_(nullptr), methods_() {}
121 
SetCode(const void * code)122   void SetCode(const void* code) {
123     DCHECK(code != nullptr);
124     code_ = code;
125   }
126 
UpdateEntryPoints(const void * entrypoint)127   void UpdateEntryPoints(const void* entrypoint) REQUIRES_SHARED(Locks::mutator_lock_) {
128     DCHECK(IsCompiled());
129     DCHECK(entrypoint == OatQuickMethodHeader::FromCodePointer(GetCode())->GetEntryPoint());
130     instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
131     for (ArtMethod* m : GetMethods()) {
132       // Because `m` might be in the process of being deleted,
133       //   - use the `ArtMethod::StillNeedsClinitCheckMayBeDead()` to check if
134       //     we can update the entrypoint, and
135       //   - call `Instrumentation::UpdateNativeMethodsCodeToJitCode` instead of the
136       //     more generic function `Instrumentation::UpdateMethodsCode()`.
137       // The `ArtMethod::StillNeedsClinitCheckMayBeDead()` checks the class status
138       // in the to-space object if any even if the method's declaring class points to
139       // the from-space class object. This way we do not miss updating an entrypoint
140       // even under uncommon circumstances, when during a GC the class becomes visibly
141       // initialized, the method becomes hot, we compile the thunk and want to update
142       // the entrypoint while the method's declaring class field still points to the
143       // from-space class object with the old status.
144       if (!m->StillNeedsClinitCheckMayBeDead()) {
145         instrum->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
146       }
147     }
148   }
149 
GetCode() const150   const void* GetCode() const {
151     return code_;
152   }
153 
IsCompiled() const154   bool IsCompiled() const {
155     return GetCode() != nullptr;
156   }
157 
AddMethod(ArtMethod * method)158   void AddMethod(ArtMethod* method) {
159     if (!ContainsElement(methods_, method)) {
160       methods_.push_back(method);
161     }
162   }
163 
GetMethods() const164   const std::vector<ArtMethod*>& GetMethods() const {
165     return methods_;
166   }
167 
RemoveMethodsIn(const LinearAlloc & alloc)168   void RemoveMethodsIn(const LinearAlloc& alloc) REQUIRES_SHARED(Locks::mutator_lock_) {
169     auto kept_end = std::partition(
170         methods_.begin(),
171         methods_.end(),
172         [&alloc](ArtMethod* method) { return !alloc.ContainsUnsafe(method); });
173     for (auto it = kept_end; it != methods_.end(); it++) {
174       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
175     }
176     methods_.erase(kept_end, methods_.end());
177   }
178 
RemoveMethod(ArtMethod * method)179   bool RemoveMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
180     auto it = std::find(methods_.begin(), methods_.end(), method);
181     if (it != methods_.end()) {
182       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
183       methods_.erase(it);
184       return true;
185     } else {
186       return false;
187     }
188   }
189 
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)190   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
191     std::replace(methods_.begin(), methods_.end(), old_method, new_method);
192   }
193 
194  private:
195   const void* code_;
196   std::vector<ArtMethod*> methods_;
197 };
198 
Create(bool used_only_for_profile_data,bool rwx_memory_allowed,bool is_zygote,std::string * error_msg)199 JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
200                                    bool rwx_memory_allowed,
201                                    bool is_zygote,
202                                    std::string* error_msg) {
203   // Register for membarrier expedited sync core if JIT will be generating code.
204   if (!used_only_for_profile_data) {
205     if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
206       // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
207       // flushed and it's used when adding code to the JIT. The memory used by the new code may
208       // have just been released and, in theory, the old code could still be in a pipeline.
209       VLOG(jit) << "Kernel does not support membarrier sync-core";
210     }
211   }
212 
213   Runtime* runtime = Runtime::Current();
214   size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
215   // Check whether the provided max capacity in options is below 1GB.
216   size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
217   // We need to have 32 bit offsets from method headers in code cache which point to things
218   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
219   // Ensure we're below 1 GB to be safe.
220   if (max_capacity > 1 * GB) {
221     std::ostringstream oss;
222     oss << "Maxium code cache capacity is limited to 1 GB, "
223         << PrettySize(max_capacity) << " is too big";
224     *error_msg = oss.str();
225     return nullptr;
226   }
227 
228   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
229   JitMemoryRegion region;
230   if (!region.Initialize(initial_capacity,
231                          max_capacity,
232                          rwx_memory_allowed,
233                          is_zygote,
234                          error_msg)) {
235     return nullptr;
236   }
237 
238   if (region.HasCodeMapping()) {
239     const MemMap* exec_pages = region.GetExecPages();
240     runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
241   }
242 
243   std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
244   if (is_zygote) {
245     // Zygote should never collect code to share the memory with the children.
246     jit_code_cache->garbage_collect_code_ = false;
247     jit_code_cache->shared_region_ = std::move(region);
248   } else {
249     jit_code_cache->private_region_ = std::move(region);
250   }
251 
252   VLOG(jit) << "Created jit code cache: initial capacity="
253             << PrettySize(initial_capacity)
254             << ", maximum capacity="
255             << PrettySize(max_capacity);
256 
257   return jit_code_cache.release();
258 }
259 
JitCodeCache()260 JitCodeCache::JitCodeCache()
261     : is_weak_access_enabled_(true),
262       inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
263       reserved_capacity_(GetInitialCapacity() * kReservedCapacityMultiplier),
264       zygote_map_(&shared_region_),
265       lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
266       collection_in_progress_(false),
267       garbage_collect_code_(true),
268       number_of_baseline_compilations_(0),
269       number_of_optimized_compilations_(0),
270       number_of_osr_compilations_(0),
271       number_of_collections_(0),
272       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
273       histogram_code_memory_use_("Memory used for compiled code", 16),
274       histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
275 }
276 
~JitCodeCache()277 JitCodeCache::~JitCodeCache() {
278   if (private_region_.HasCodeMapping()) {
279     const MemMap* exec_pages = private_region_.GetExecPages();
280     Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
281   }
282   if (shared_region_.HasCodeMapping()) {
283     const MemMap* exec_pages = shared_region_.GetExecPages();
284     Runtime::Current()->RemoveGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
285   }
286 }
287 
PrivateRegionContainsPc(const void * ptr) const288 bool JitCodeCache::PrivateRegionContainsPc(const void* ptr) const {
289   return private_region_.IsInExecSpace(ptr);
290 }
291 
ContainsPc(const void * ptr) const292 bool JitCodeCache::ContainsPc(const void* ptr) const {
293   return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
294 }
295 
ContainsMethod(ArtMethod * method)296 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
297   Thread* self = Thread::Current();
298   ScopedDebugDisallowReadBarriers sddrb(self);
299   ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
300   if (UNLIKELY(method->IsNative())) {
301     auto it = jni_stubs_map_.find(JniStubKey(method));
302     if (it != jni_stubs_map_.end() &&
303         it->second.IsCompiled() &&
304         ContainsElement(it->second.GetMethods(), method)) {
305       return true;
306     }
307   } else {
308     for (const auto& it : method_code_map_) {
309       if (it.second == method) {
310         return true;
311       }
312     }
313     if (zygote_map_.ContainsMethod(method)) {
314       return true;
315     }
316   }
317   return false;
318 }
319 
GetJniStubCode(ArtMethod * method)320 const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
321   DCHECK(method->IsNative());
322   Thread* self = Thread::Current();
323   ScopedDebugDisallowReadBarriers sddrb(self);
324   ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
325   auto it = jni_stubs_map_.find(JniStubKey(method));
326   if (it != jni_stubs_map_.end()) {
327     JniStubData& data = it->second;
328     if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
329       return data.GetCode();
330     }
331   }
332   return nullptr;
333 }
334 
GetSavedEntryPointOfPreCompiledMethod(ArtMethod * method)335 const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) {
336   Thread* self = Thread::Current();
337   ScopedDebugDisallowReadBarriers sddrb(self);
338   if (method->IsPreCompiled()) {
339     const void* code_ptr = nullptr;
340     if (method->GetDeclaringClass<kWithoutReadBarrier>()->IsBootStrapClassLoaded()) {
341       code_ptr = zygote_map_.GetCodeFor(method);
342     } else {
343       WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
344       auto it = saved_compiled_methods_map_.find(method);
345       if (it != saved_compiled_methods_map_.end()) {
346         code_ptr = it->second;
347         // Now that we're using the saved entrypoint, remove it from the saved map.
348         saved_compiled_methods_map_.erase(it);
349       }
350     }
351     if (code_ptr != nullptr) {
352       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
353       return method_header->GetEntryPoint();
354     }
355   }
356   return nullptr;
357 }
358 
WaitForPotentialCollectionToComplete(Thread * self)359 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
360   bool in_collection = false;
361   while (collection_in_progress_) {
362     in_collection = true;
363     lock_cond_.Wait(self);
364   }
365   return in_collection;
366 }
367 
FromCodeToAllocation(const void * code)368 static uintptr_t FromCodeToAllocation(const void* code) {
369   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA);
370   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
371 }
372 
FromAllocationToCode(const uint8_t * alloc)373 static const void* FromAllocationToCode(const uint8_t* alloc) {
374   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA);
375   return reinterpret_cast<const void*>(alloc + RoundUp(sizeof(OatQuickMethodHeader), alignment));
376 }
377 
GetNumberOfRoots(const uint8_t * stack_map)378 static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
379   // The length of the table is stored just before the stack map (and therefore at the end of
380   // the table itself), in order to be able to fetch it from a `stack_map` pointer.
381   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
382 }
383 
DCheckRootsAreValid(const std::vector<Handle<mirror::Object>> & roots,bool is_shared_region)384 static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
385                                 bool is_shared_region)
386     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
387   if (!kIsDebugBuild) {
388     return;
389   }
390   // Put all roots in `roots_data`.
391   for (Handle<mirror::Object> object : roots) {
392     // Ensure the string is strongly interned. b/32995596
393     if (object->IsString()) {
394       ObjPtr<mirror::String> str = object->AsString();
395       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
396       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
397     }
398     // Ensure that we don't put movable objects in the shared region.
399     if (is_shared_region) {
400       CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
401     }
402   }
403 }
404 
SweepRootTables(IsMarkedVisitor * visitor)405 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
406   Thread* self = Thread::Current();
407   ScopedDebugDisallowReadBarriers sddrb(self);
408   {
409     ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
410     for (const auto& entry : method_code_map_) {
411       uint32_t number_of_roots = 0;
412       const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
413       uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
414           ? private_region_.GetWritableDataAddress(root_table)
415           : shared_region_.GetWritableDataAddress(root_table);
416       GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
417       for (uint32_t i = 0; i < number_of_roots; ++i) {
418         // This does not need a read barrier because this is called by GC.
419         mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
420         if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
421           // entry got deleted in a previous sweep.
422         } else if (object->IsString<kDefaultVerifyFlags>()) {
423           mirror::Object* new_object = visitor->IsMarked(object);
424           // We know the string is marked because it's a strongly-interned string that
425           // is always alive.
426           // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
427           // out of the weak access/creation pause. b/32167580
428           DCHECK_NE(new_object, nullptr) << "old-string:" << object;
429           if (new_object != object) {
430             roots[i] = GcRoot<mirror::Object>(new_object);
431           }
432         } else if (object->IsClass<kDefaultVerifyFlags>()) {
433           mirror::Object* new_klass = visitor->IsMarked(object);
434           if (new_klass == nullptr) {
435             roots[i] = GcRoot<mirror::Object>(Runtime::GetWeakClassSentinel());
436           } else if (new_klass != object) {
437             roots[i] = GcRoot<mirror::Object>(new_klass);
438           }
439         } else {
440           mirror::Object* new_method_type = visitor->IsMarked(object);
441           if (kIsDebugBuild) {
442             if (new_method_type != nullptr) {
443               // SweepSystemWeaks() is happening in the compaction pause. At that point
444               // IsMarked(object) returns the moved address, but the content is not there yet.
445               if (!Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
446                 ObjPtr<mirror::Class> method_type_class =
447                     WellKnownClasses::java_lang_invoke_MethodType.Get<kWithoutReadBarrier>();
448 
449                 CHECK_EQ((new_method_type->GetClass<kVerifyNone, kWithoutReadBarrier>()),
450                          method_type_class.Ptr());
451               }
452             }
453           }
454           if (new_method_type == nullptr) {
455             roots[i] = nullptr;
456           } else if (new_method_type != object) {
457             // References are updated in VisitRootTables. Reaching this means that ArtMethod is no
458             // longer reachable.
459             roots[i] = GcRoot<mirror::Object>(new_method_type);
460           }
461         }
462       }
463     }
464   }
465   MutexLock mu(self, *Locks::jit_lock_);
466   // Walk over inline caches to clear entries containing unloaded classes.
467   for (const auto& [_, info] : profiling_infos_) {
468     InlineCache* caches = info->GetInlineCaches();
469     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
470       InlineCache* cache = &caches[i];
471       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
472         mirror::Class* klass = cache->classes_[j].Read<kWithoutReadBarrier>();
473         if (klass != nullptr) {
474           mirror::Class* new_klass = down_cast<mirror::Class*>(visitor->IsMarked(klass));
475           if (new_klass != klass) {
476             cache->classes_[j] = GcRoot<mirror::Class>(new_klass);
477           }
478         }
479       }
480     }
481   }
482 }
483 
FreeCodeAndData(const void * code_ptr)484 void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
485   if (IsInZygoteExecSpace(code_ptr)) {
486     // No need to free, this is shared memory.
487     return;
488   }
489   uintptr_t allocation = FromCodeToAllocation(code_ptr);
490   const uint8_t* data = nullptr;
491   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
492     data = GetRootTable(code_ptr);
493   }  // else this is a JNI stub without any data.
494 
495   FreeLocked(&private_region_, reinterpret_cast<uint8_t*>(allocation), data);
496 }
497 
FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader * > & method_headers)498 void JitCodeCache::FreeAllMethodHeaders(
499     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
500   // We need to remove entries in method_headers from CHA dependencies
501   // first since once we do FreeCode() below, the memory can be reused
502   // so it's possible for the same method_header to start representing
503   // different compile code.
504   {
505     MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
506     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
507         ->RemoveDependentsWithMethodHeaders(method_headers);
508   }
509 
510   {
511     ScopedCodeCacheWrite scc(private_region_);
512     for (const OatQuickMethodHeader* method_header : method_headers) {
513       FreeCodeAndData(method_header->GetCode());
514     }
515 
516     // We have potentially removed a lot of debug info. Do maintenance pass to save space.
517     RepackNativeDebugInfoForJit();
518   }
519 
520   // Check that the set of compiled methods exactly matches native debug information.
521   // Does not check zygote methods since they can change concurrently.
522   if (kIsDebugBuild && !Runtime::Current()->IsZygote()) {
523     std::map<const void*, ArtMethod*> compiled_methods;
524     std::set<const void*> debug_info;
525     ReaderMutexLock mu2(Thread::Current(), *Locks::jit_mutator_lock_);
526     VisitAllMethods([&](const void* addr, ArtMethod* method) {
527       if (!IsInZygoteExecSpace(addr)) {
528         CHECK(addr != nullptr && method != nullptr);
529         compiled_methods.emplace(addr, method);
530       }
531     });
532     ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
533       addr = AlignDown(addr,
534                        GetInstructionSetInstructionAlignment(kRuntimeQuickCodeISA));  // Thumb-bit.
535       bool res = debug_info.emplace(addr).second;
536       CHECK(res) << "Duplicate debug info: " << addr << " " << name;
537       CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
538     });
539     if (!debug_info.empty()) {  // If debug-info generation is enabled.
540       for (const auto& [addr, method] : compiled_methods) {
541         CHECK_EQ(debug_info.count(addr), 1u) << "Mising debug info";
542       }
543       CHECK_EQ(compiled_methods.size(), debug_info.size());
544     }
545   }
546 }
547 
RemoveMethodsIn(Thread * self,const LinearAlloc & alloc)548 void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
549   ScopedTrace trace(__PRETTY_FUNCTION__);
550   ScopedDebugDisallowReadBarriers sddrb(self);
551   // We use a set to first collect all method_headers whose code need to be
552   // removed. We need to free the underlying code after we remove CHA dependencies
553   // for entries in this set. And it's more efficient to iterate through
554   // the CHA dependency map just once with an unordered_set.
555   std::unordered_set<OatQuickMethodHeader*> method_headers;
556   MutexLock mu(self, *Locks::jit_lock_);
557   {
558     WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
559     // We do not check if a code cache GC is in progress, as this method comes
560     // with the classlinker_classes_lock_ held, and suspending ourselves could
561     // lead to a deadlock.
562     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
563       it->second.RemoveMethodsIn(alloc);
564       if (it->second.GetMethods().empty()) {
565         method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
566         it = jni_stubs_map_.erase(it);
567       } else {
568         it->first.UpdateShorty(it->second.GetMethods().front());
569         ++it;
570       }
571     }
572     for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) {
573       if (alloc.ContainsUnsafe(*it)) {
574         it = zombie_jni_code_.erase(it);
575       } else {
576         ++it;
577       }
578     }
579     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
580       if (alloc.ContainsUnsafe(it->second)) {
581         method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
582         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
583         zombie_code_.erase(it->first);
584         processed_zombie_code_.erase(it->first);
585         method_code_map_reversed_.erase(it->second);
586         it = method_code_map_.erase(it);
587       } else {
588         ++it;
589       }
590     }
591     for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
592       DCHECK(!ContainsElement(zombie_code_, it->second));
593       if (alloc.ContainsUnsafe(it->first)) {
594         // Note that the code has already been pushed to method_headers in the loop
595         // above and is going to be removed in FreeCode() below.
596         it = osr_code_map_.erase(it);
597       } else {
598         ++it;
599       }
600     }
601   }
602 
603   for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
604     if (alloc.ContainsUnsafe(*it)) {
605       it = processed_zombie_jni_code_.erase(it);
606     } else {
607       ++it;
608     }
609   }
610 
611   for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
612     ProfilingInfo* info = it->second;
613     if (alloc.ContainsUnsafe(info->GetMethod())) {
614       private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
615       it = profiling_infos_.erase(it);
616     } else {
617       ++it;
618     }
619   }
620   FreeAllMethodHeaders(method_headers);
621 }
622 
IsWeakAccessEnabled(Thread * self) const623 bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
624   return gUseReadBarrier
625       ? self->GetWeakRefAccessEnabled()
626       : is_weak_access_enabled_.load(std::memory_order_seq_cst);
627 }
628 
WaitUntilInlineCacheAccessible(Thread * self)629 void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
630   if (IsWeakAccessEnabled(self)) {
631     return;
632   }
633   ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
634   MutexLock mu(self, *Locks::jit_lock_);
635   while (!IsWeakAccessEnabled(self)) {
636     inline_cache_cond_.Wait(self);
637   }
638 }
639 
GetRootTable(const void * code_ptr,uint32_t * number_of_roots)640 const uint8_t* JitCodeCache::GetRootTable(const void* code_ptr, uint32_t* number_of_roots) {
641   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
642   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
643   uint32_t num_roots = GetNumberOfRoots(data);
644   if (number_of_roots != nullptr) {
645     *number_of_roots = num_roots;
646   }
647   return data - ComputeRootTableSize(num_roots);
648 }
649 
BroadcastForInlineCacheAccess()650 void JitCodeCache::BroadcastForInlineCacheAccess() {
651   Thread* self = Thread::Current();
652   MutexLock mu(self, *Locks::jit_lock_);
653   inline_cache_cond_.Broadcast(self);
654 }
655 
AllowInlineCacheAccess()656 void JitCodeCache::AllowInlineCacheAccess() {
657   DCHECK(!gUseReadBarrier);
658   is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
659   BroadcastForInlineCacheAccess();
660 }
661 
DisallowInlineCacheAccess()662 void JitCodeCache::DisallowInlineCacheAccess() {
663   DCHECK(!gUseReadBarrier);
664   is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
665 }
666 
CopyInlineCacheInto(const InlineCache & ic,StackHandleScope<InlineCache::kIndividualCacheSize> * classes)667 void JitCodeCache::CopyInlineCacheInto(
668     const InlineCache& ic,
669     /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) {
670   static_assert(arraysize(ic.classes_) == InlineCache::kIndividualCacheSize);
671   DCHECK_EQ(classes->Capacity(), InlineCache::kIndividualCacheSize);
672   DCHECK_EQ(classes->Size(), 0u);
673   WaitUntilInlineCacheAccessible(Thread::Current());
674   // Note that we don't need to lock `lock_` here, the compiler calling
675   // this method has already ensured the inline cache will not be deleted.
676   for (const GcRoot<mirror::Class>& root : ic.classes_) {
677     mirror::Class* object = root.Read();
678     if (object != nullptr) {
679       DCHECK_LT(classes->Size(), classes->Capacity());
680       classes->NewHandle(object);
681     }
682   }
683 }
684 
Commit(Thread * self,JitMemoryRegion * region,ArtMethod * method,ArrayRef<const uint8_t> reserved_code,ArrayRef<const uint8_t> code,ArrayRef<const uint8_t> reserved_data,const std::vector<Handle<mirror::Object>> & roots,ArrayRef<const uint8_t> stack_map,const std::vector<uint8_t> & debug_info,bool is_full_debug_info,CompilationKind compilation_kind,const ArenaSet<ArtMethod * > & cha_single_implementation_list)685 bool JitCodeCache::Commit(Thread* self,
686                           JitMemoryRegion* region,
687                           ArtMethod* method,
688                           ArrayRef<const uint8_t> reserved_code,
689                           ArrayRef<const uint8_t> code,
690                           ArrayRef<const uint8_t> reserved_data,
691                           const std::vector<Handle<mirror::Object>>& roots,
692                           ArrayRef<const uint8_t> stack_map,
693                           const std::vector<uint8_t>& debug_info,
694                           bool is_full_debug_info,
695                           CompilationKind compilation_kind,
696                           const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
697   DCHECK_IMPLIES(method->IsNative(), (compilation_kind != CompilationKind::kOsr));
698 
699   if (!method->IsNative()) {
700     // We need to do this before grabbing the lock_ because it needs to be able to see the string
701     // InternTable. Native methods do not have roots.
702     DCheckRootsAreValid(roots, IsSharedRegion(*region));
703   }
704 
705   const uint8_t* roots_data = reserved_data.data();
706   size_t root_table_size = ComputeRootTableSize(roots.size());
707   const uint8_t* stack_map_data = roots_data + root_table_size;
708 
709   OatQuickMethodHeader* method_header = nullptr;
710   {
711     MutexLock mu(self, *Locks::jit_lock_);
712     const uint8_t* code_ptr = region->CommitCode(reserved_code, code, stack_map_data);
713     if (code_ptr == nullptr) {
714       return false;
715     }
716     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
717 
718     // Commit roots and stack maps before updating the entry point.
719     if (!region->CommitData(reserved_data, roots, stack_map)) {
720       return false;
721     }
722 
723     switch (compilation_kind) {
724       case CompilationKind::kOsr:
725         number_of_osr_compilations_++;
726         break;
727       case CompilationKind::kBaseline:
728         number_of_baseline_compilations_++;
729         break;
730       case CompilationKind::kOptimized:
731         number_of_optimized_compilations_++;
732         break;
733     }
734 
735     // We need to update the debug info before the entry point gets set.
736     // At the same time we want to do under JIT lock so that debug info and JIT maps are in sync.
737     if (!debug_info.empty()) {
738       // NB: Don't allow packing of full info since it would remove non-backtrace data.
739       AddNativeDebugInfoForJit(code_ptr, debug_info, /*allow_packing=*/ !is_full_debug_info);
740     }
741 
742     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
743     // compiled code is considered invalidated by some class linking, but below we still make the
744     // compiled code valid for the method.  Need cha_lock_ for checking all single-implementation
745     // flags and register dependencies.
746     {
747       ScopedDebugDisallowReadBarriers sddrb(self);
748       MutexLock cha_mu(self, *Locks::cha_lock_);
749       bool single_impl_still_valid = true;
750       for (ArtMethod* single_impl : cha_single_implementation_list) {
751         if (!single_impl->HasSingleImplementation()) {
752           // Simply discard the compiled code.
753           // Hopefully the class hierarchy will be more stable when compilation is retried.
754           single_impl_still_valid = false;
755           break;
756         }
757       }
758 
759       // Discard the code if any single-implementation assumptions are now invalid.
760       if (UNLIKELY(!single_impl_still_valid)) {
761         VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
762         return false;
763       }
764       DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
765           << "Should not be using cha on debuggable apps/runs!";
766 
767       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
768       for (ArtMethod* single_impl : cha_single_implementation_list) {
769         class_linker->GetClassHierarchyAnalysis()->AddDependency(
770             single_impl, method, method_header);
771       }
772     }
773 
774     if (UNLIKELY(method->IsNative())) {
775       ScopedDebugDisallowReadBarriers sddrb(self);
776       WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
777       auto it = jni_stubs_map_.find(JniStubKey(method));
778       DCHECK(it != jni_stubs_map_.end())
779           << "Entry inserted in NotifyCompilationOf() should be alive.";
780       JniStubData* data = &it->second;
781       DCHECK(ContainsElement(data->GetMethods(), method))
782           << "Entry inserted in NotifyCompilationOf() should contain this method.";
783       data->SetCode(code_ptr);
784       data->UpdateEntryPoints(method_header->GetEntryPoint());
785     } else {
786       if (method->IsPreCompiled() && IsSharedRegion(*region)) {
787         ScopedDebugDisallowReadBarriers sddrb(self);
788         zygote_map_.Put(code_ptr, method);
789       } else {
790         ScopedDebugDisallowReadBarriers sddrb(self);
791         WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
792         method_code_map_.Put(code_ptr, method);
793 
794         // Searching for MethodType-s in roots. They need to be treated as strongly reachable while
795         // the corresponding ArtMethod is not removed.
796         ObjPtr<mirror::Class> method_type_class =
797             WellKnownClasses::java_lang_invoke_MethodType.Get<kWithoutReadBarrier>();
798 
799         for (const Handle<mirror::Object>& root : roots) {
800           ObjPtr<mirror::Class> klass = root->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>();
801           if (klass == method_type_class ||
802               klass == ReadBarrier::IsMarked(method_type_class.Ptr()) ||
803               ReadBarrier::IsMarked(klass.Ptr()) == method_type_class) {
804             auto it = method_code_map_reversed_.FindOrAdd(method, std::vector<const void*>());
805             std::vector<const void*>& code_ptrs = it->second;
806 
807             DCHECK(std::find(code_ptrs.begin(), code_ptrs.end(), code_ptr) == code_ptrs.end());
808             it->second.emplace_back(code_ptr);
809 
810             // `MethodType`s are strong GC roots and need write barrier.
811             WriteBarrier::ForEveryFieldWrite(method->GetDeclaringClass<kWithoutReadBarrier>());
812             break;
813           }
814         }
815       }
816       if (compilation_kind == CompilationKind::kOsr) {
817         ScopedDebugDisallowReadBarriers sddrb(self);
818         WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
819         osr_code_map_.Put(method, code_ptr);
820       } else if (method->StillNeedsClinitCheck()) {
821         ScopedDebugDisallowReadBarriers sddrb(self);
822         // This situation currently only occurs in the jit-zygote mode.
823         DCHECK(!garbage_collect_code_);
824         DCHECK(method->IsPreCompiled());
825         // The shared region can easily be queried. For the private region, we
826         // use a side map.
827         if (!IsSharedRegion(*region)) {
828           WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
829           saved_compiled_methods_map_.Put(method, code_ptr);
830         }
831       } else {
832         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
833             method, method_header->GetEntryPoint());
834       }
835     }
836     VLOG(jit)
837         << "JIT added (kind=" << compilation_kind << ") "
838         << ArtMethod::PrettyMethod(method) << "@" << method
839         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
840         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
841         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
842         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
843                                          method_header->GetCodeSize());
844   }
845 
846   if (kIsDebugBuild) {
847     uintptr_t entry_point = reinterpret_cast<uintptr_t>(method_header->GetEntryPoint());
848     DCHECK_EQ(LookupMethodHeader(entry_point, method), method_header) << method->PrettyMethod();
849     DCHECK_EQ(LookupMethodHeader(entry_point + method_header->GetCodeSize() - 1, method),
850               method_header) << method->PrettyMethod();
851   }
852   return true;
853 }
854 
CodeCacheSize()855 size_t JitCodeCache::CodeCacheSize() {
856   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
857   return CodeCacheSizeLocked();
858 }
859 
RemoveMethod(ArtMethod * method,bool release_memory)860 bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
861   // This function is used only for testing and only with non-native methods.
862   CHECK(!method->IsNative());
863 
864   Thread* self = Thread::Current();
865   ScopedDebugDisallowReadBarriers sddrb(self);
866   MutexLock mu(self, *Locks::jit_lock_);
867 
868   bool in_cache = RemoveMethodLocked(method, release_memory);
869 
870   if (!in_cache) {
871     return false;
872   }
873 
874   Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
875   return true;
876 }
877 
RemoveMethodLocked(ArtMethod * method,bool release_memory)878 bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
879   if (LIKELY(!method->IsNative())) {
880     auto it = profiling_infos_.find(method);
881     if (it != profiling_infos_.end()) {
882       profiling_infos_.erase(it);
883     }
884   }
885 
886   bool in_cache = false;
887   ScopedCodeCacheWrite ccw(private_region_);
888   WriterMutexLock mu(Thread::Current(), *Locks::jit_mutator_lock_);
889   if (UNLIKELY(method->IsNative())) {
890     auto it = jni_stubs_map_.find(JniStubKey(method));
891     if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
892       in_cache = true;
893       if (it->second.GetMethods().empty()) {
894         if (release_memory) {
895           FreeCodeAndData(it->second.GetCode());
896         }
897         jni_stubs_map_.erase(it);
898       } else {
899         it->first.UpdateShorty(it->second.GetMethods().front());
900       }
901       zombie_jni_code_.erase(method);
902       processed_zombie_jni_code_.erase(method);
903     }
904   } else {
905     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
906       if (it->second == method) {
907         in_cache = true;
908         if (release_memory) {
909           FreeCodeAndData(it->first);
910         }
911         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
912         it = method_code_map_.erase(it);
913       } else {
914         ++it;
915       }
916     }
917     method_code_map_reversed_.erase(method);
918 
919     auto osr_it = osr_code_map_.find(method);
920     if (osr_it != osr_code_map_.end()) {
921       osr_code_map_.erase(osr_it);
922     }
923   }
924 
925   return in_cache;
926 }
927 
928 // This notifies the code cache that the given method has been redefined and that it should remove
929 // any cached information it has on the method. All threads must be suspended before calling this
930 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
NotifyMethodRedefined(ArtMethod * method)931 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
932   Thread* self = Thread::Current();
933   ScopedDebugDisallowReadBarriers sddrb(self);
934   MutexLock mu(self, *Locks::jit_lock_);
935   RemoveMethodLocked(method, /* release_memory= */ true);
936 }
937 
938 // This invalidates old_method. Once this function returns one can no longer use old_method to
939 // execute code unless it is fixed up. This fixup will happen later in the process of installing a
940 // class redefinition.
941 // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
942 // shouldn't be used since it is no longer logically in the jit code cache.
943 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)944 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
945   Thread* self = Thread::Current();
946   ScopedDebugDisallowReadBarriers sddrb(self);
947   WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
948   if (old_method->IsNative()) {
949     // Update methods in jni_stubs_map_.
950     for (auto& entry : jni_stubs_map_) {
951       JniStubData& data = entry.second;
952       data.MoveObsoleteMethod(old_method, new_method);
953     }
954     return;
955   }
956 
957   // Update method_code_map_ to point to the new method.
958   for (auto& it : method_code_map_) {
959     if (it.second == old_method) {
960       it.second = new_method;
961     }
962   }
963   // Update osr_code_map_ to point to the new method.
964   auto code_map = osr_code_map_.find(old_method);
965   if (code_map != osr_code_map_.end()) {
966     osr_code_map_.Put(new_method, code_map->second);
967     osr_code_map_.erase(old_method);
968   }
969 
970   auto node = method_code_map_reversed_.extract(old_method);
971   if (!node.empty()) {
972     node.key() = new_method;
973     method_code_map_reversed_.insert(std::move(node));
974   }
975 }
976 
TransitionToDebuggable()977 void JitCodeCache::TransitionToDebuggable() {
978   // Check that none of our methods have an entrypoint in the zygote exec
979   // space (this should be taken care of by
980   // ClassLinker::UpdateEntryPointsClassVisitor.
981   Thread* self = Thread::Current();
982   ScopedDebugDisallowReadBarriers sddrb(self);
983   if (kIsDebugBuild) {
984     // TODO: Check `jni_stubs_map_`?
985     ReaderMutexLock mu2(self, *Locks::jit_mutator_lock_);
986     for (const auto& entry : method_code_map_) {
987       ArtMethod* method = entry.second;
988       DCHECK(!method->IsPreCompiled());
989       DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
990     }
991   }
992   {
993     WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
994     // Not strictly necessary, but this map is useless now.
995     saved_compiled_methods_map_.clear();
996   }
997   if (kIsDebugBuild) {
998     for (const auto& entry : zygote_map_) {
999       ArtMethod* method = entry.method;
1000       if (method != nullptr) {
1001         DCHECK(!method->IsPreCompiled());
1002         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
1003       }
1004     }
1005   }
1006 }
1007 
CodeCacheSizeLocked()1008 size_t JitCodeCache::CodeCacheSizeLocked() {
1009   return GetCurrentRegion()->GetUsedMemoryForCode();
1010 }
1011 
DataCacheSize()1012 size_t JitCodeCache::DataCacheSize() {
1013   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1014   return DataCacheSizeLocked();
1015 }
1016 
DataCacheSizeLocked()1017 size_t JitCodeCache::DataCacheSizeLocked() {
1018   return GetCurrentRegion()->GetUsedMemoryForData();
1019 }
1020 
Reserve(Thread * self,JitMemoryRegion * region,size_t code_size,size_t stack_map_size,size_t number_of_roots,ArtMethod * method,ArrayRef<const uint8_t> * reserved_code,ArrayRef<const uint8_t> * reserved_data)1021 bool JitCodeCache::Reserve(Thread* self,
1022                            JitMemoryRegion* region,
1023                            size_t code_size,
1024                            size_t stack_map_size,
1025                            size_t number_of_roots,
1026                            ArtMethod* method,
1027                            /*out*/ArrayRef<const uint8_t>* reserved_code,
1028                            /*out*/ArrayRef<const uint8_t>* reserved_data) {
1029   code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
1030   size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
1031 
1032   const uint8_t* code;
1033   const uint8_t* data;
1034   while (true) {
1035     bool at_max_capacity = false;
1036     {
1037       ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1038       MutexLock mu(self, *Locks::jit_lock_);
1039       ScopedCodeCacheWrite ccw(*region);
1040       code = region->AllocateCode(code_size);
1041       data = region->AllocateData(data_size);
1042       at_max_capacity = IsAtMaxCapacity();
1043     }
1044     if (code != nullptr && data != nullptr) {
1045       break;
1046     }
1047     Free(self, region, code, data);
1048     if (at_max_capacity) {
1049       VLOG(jit) << "JIT failed to allocate code of size "
1050                 << PrettySize(code_size)
1051                 << ", and data of size "
1052                 << PrettySize(data_size);
1053       return false;
1054     }
1055     // Increase the capacity and try again.
1056     IncreaseCodeCacheCapacity(self);
1057   }
1058 
1059   *reserved_code = ArrayRef<const uint8_t>(code, code_size);
1060   *reserved_data = ArrayRef<const uint8_t>(data, data_size);
1061 
1062   MutexLock mu(self, *Locks::jit_lock_);
1063   histogram_code_memory_use_.AddValue(code_size);
1064   if (code_size > kCodeSizeLogThreshold) {
1065     LOG(INFO) << "JIT allocated "
1066               << PrettySize(code_size)
1067               << " for compiled code of "
1068               << ArtMethod::PrettyMethod(method);
1069   }
1070   histogram_stack_map_memory_use_.AddValue(data_size);
1071   if (data_size > kStackMapSizeLogThreshold) {
1072     LOG(INFO) << "JIT allocated "
1073               << PrettySize(data_size)
1074               << " for stack maps of "
1075               << ArtMethod::PrettyMethod(method);
1076   }
1077   return true;
1078 }
1079 
Free(Thread * self,JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1080 void JitCodeCache::Free(Thread* self,
1081                         JitMemoryRegion* region,
1082                         const uint8_t* code,
1083                         const uint8_t* data) {
1084   MutexLock mu(self, *Locks::jit_lock_);
1085   ScopedCodeCacheWrite ccw(*region);
1086   FreeLocked(region, code, data);
1087 }
1088 
FreeLocked(JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1089 void JitCodeCache::FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) {
1090   if (code != nullptr) {
1091     RemoveNativeDebugInfoForJit(reinterpret_cast<const void*>(FromAllocationToCode(code)));
1092     region->FreeCode(code);
1093   }
1094   if (data != nullptr) {
1095     region->FreeData(data);
1096   }
1097 }
1098 
1099 class MarkCodeClosure final : public Closure {
1100  public:
MarkCodeClosure(JitCodeCache * code_cache,CodeCacheBitmap * bitmap,Barrier * barrier)1101   MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
1102       : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {}
1103 
Run(Thread * thread)1104   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
1105     ScopedTrace trace(__PRETTY_FUNCTION__);
1106     DCHECK(thread == Thread::Current() || thread->IsSuspended());
1107     StackVisitor::WalkStack(
1108         [&](const art::StackVisitor* stack_visitor) {
1109           const OatQuickMethodHeader* method_header =
1110               stack_visitor->GetCurrentOatQuickMethodHeader();
1111           if (method_header == nullptr) {
1112             return true;
1113           }
1114           const void* code = method_header->GetCode();
1115           if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
1116             // Use the atomic set version, as multiple threads are executing this code.
1117             bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
1118           }
1119           return true;
1120         },
1121         thread,
1122         /* context= */ nullptr,
1123         art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
1124 
1125     barrier_->Pass(Thread::Current());
1126   }
1127 
1128  private:
1129   JitCodeCache* const code_cache_;
1130   CodeCacheBitmap* const bitmap_;
1131   Barrier* const barrier_;
1132 };
1133 
MarkCompiledCodeOnThreadStacks(Thread * self)1134 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
1135   Barrier barrier(0);
1136   size_t threads_running_checkpoint = 0;
1137   MarkCodeClosure closure(this, GetLiveBitmap(), &barrier);
1138   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1139   // Now that we have run our checkpoint, move to a suspended state and wait
1140   // for other threads to run the checkpoint.
1141   ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1142   if (threads_running_checkpoint != 0) {
1143     barrier.Increment(self, threads_running_checkpoint);
1144   }
1145 }
1146 
IsAtMaxCapacity() const1147 bool JitCodeCache::IsAtMaxCapacity() const {
1148   return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity();
1149 }
1150 
IncreaseCodeCacheCapacity(Thread * self)1151 void JitCodeCache::IncreaseCodeCacheCapacity(Thread* self) {
1152   ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1153   MutexLock mu(self, *Locks::jit_lock_);
1154   // Wait for a potential collection, as the size of the bitmap used by that collection
1155   // is of the current capacity.
1156   WaitForPotentialCollectionToComplete(self);
1157   private_region_.IncreaseCodeCacheCapacity();
1158 }
1159 
RemoveUnmarkedCode(Thread * self)1160 void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
1161   ScopedTrace trace(__FUNCTION__);
1162   std::unordered_set<OatQuickMethodHeader*> method_headers;
1163   ScopedDebugDisallowReadBarriers sddrb(self);
1164   MutexLock mu(self, *Locks::jit_lock_);
1165   // Iterate over all zombie code and remove entries that are not marked.
1166   for (auto it = processed_zombie_code_.begin(); it != processed_zombie_code_.end();) {
1167     const void* code_ptr = *it;
1168     uintptr_t allocation = FromCodeToAllocation(code_ptr);
1169     DCHECK(!IsInZygoteExecSpace(code_ptr));
1170     if (GetLiveBitmap()->Test(allocation)) {
1171       ++it;
1172     } else {
1173       OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1174       method_headers.insert(header);
1175       {
1176         WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
1177         auto method_it = method_code_map_.find(header->GetCode());
1178 
1179         if (method_it != method_code_map_.end()) {
1180           ArtMethod* method = method_it->second;
1181           auto code_ptrs_it = method_code_map_reversed_.find(method);
1182 
1183           if (code_ptrs_it != method_code_map_reversed_.end()) {
1184             std::vector<const void*>& code_ptrs = code_ptrs_it->second;
1185             RemoveElement(code_ptrs, code_ptr);
1186 
1187             if (code_ptrs.empty()) {
1188               method_code_map_reversed_.erase(code_ptrs_it);
1189             }
1190           }
1191         }
1192 
1193         method_code_map_.erase(header->GetCode());
1194       }
1195       VLOG(jit) << "JIT removed " << *it;
1196       it = processed_zombie_code_.erase(it);
1197     }
1198   }
1199   for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
1200     WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
1201     ArtMethod* method = *it;
1202     auto stub = jni_stubs_map_.find(JniStubKey(method));
1203     DCHECK(stub != jni_stubs_map_.end()) << method->PrettyMethod();
1204     JniStubData& data = stub->second;
1205     DCHECK(data.IsCompiled());
1206     DCHECK(ContainsElement(data.GetMethods(), method));
1207     if (!GetLiveBitmap()->Test(FromCodeToAllocation(data.GetCode()))) {
1208       data.RemoveMethod(method);
1209       if (data.GetMethods().empty()) {
1210         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1211         method_headers.insert(header);
1212         CHECK(ContainsPc(header));
1213         VLOG(jit) << "JIT removed native code of" << method->PrettyMethod();
1214         jni_stubs_map_.erase(stub);
1215       } else {
1216         stub->first.UpdateShorty(stub->second.GetMethods().front());
1217       }
1218       it = processed_zombie_jni_code_.erase(it);
1219     } else {
1220       ++it;
1221     }
1222   }
1223   FreeAllMethodHeaders(method_headers);
1224 }
1225 
1226 class JitGcTask final : public Task {
1227  public:
JitGcTask()1228   JitGcTask() {}
1229 
Run(Thread * self)1230   void Run(Thread* self) override {
1231     Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self);
1232   }
1233 
Finalize()1234   void Finalize() override {
1235     delete this;
1236   }
1237 };
1238 
AddZombieCode(ArtMethod * method,const void * entry_point)1239 void JitCodeCache::AddZombieCode(ArtMethod* method, const void* entry_point) {
1240   CHECK(ContainsPc(entry_point));
1241   CHECK(method->IsNative() || (method->GetEntryPointFromQuickCompiledCode() != entry_point));
1242   const void* code_ptr = OatQuickMethodHeader::FromEntryPoint(entry_point)->GetCode();
1243   if (!IsInZygoteExecSpace(code_ptr)) {
1244     Thread* self = Thread::Current();
1245     if (Locks::jit_mutator_lock_->IsExclusiveHeld(self)) {
1246       AddZombieCodeInternal(method, code_ptr);
1247     } else {
1248       WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
1249       AddZombieCodeInternal(method, code_ptr);
1250     }
1251   }
1252 }
1253 
1254 
AddZombieCodeInternal(ArtMethod * method,const void * code_ptr)1255 void JitCodeCache::AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) {
1256   if (method->IsNative()) {
1257     if (kIsDebugBuild) {
1258       auto it = jni_stubs_map_.find(JniStubKey(method));
1259       CHECK(it != jni_stubs_map_.end()) << method->PrettyMethod();
1260       CHECK(it->second.IsCompiled()) << method->PrettyMethod();
1261       CHECK_EQ(it->second.GetCode(), code_ptr) << method->PrettyMethod();
1262       CHECK(ContainsElement(it->second.GetMethods(), method)) << method->PrettyMethod();
1263     }
1264     zombie_jni_code_.insert(method);
1265   } else {
1266     CHECK(!ContainsElement(zombie_code_, code_ptr));
1267     zombie_code_.insert(code_ptr);
1268   }
1269 
1270   // Arbitrary threshold of number of zombie code before doing a GC.
1271   static constexpr size_t kNumberOfZombieCodeThreshold = kIsDebugBuild ? 1 : 1000;
1272   size_t number_of_code_to_delete =
1273       zombie_code_.size() + zombie_jni_code_.size() + osr_code_map_.size();
1274   if (number_of_code_to_delete >= kNumberOfZombieCodeThreshold) {
1275     JitThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
1276     if (pool != nullptr && !std::atomic_exchange_explicit(&gc_task_scheduled_,
1277                                                           true,
1278                                                           std::memory_order_relaxed)) {
1279       pool->AddTask(Thread::Current(), new JitGcTask());
1280     }
1281   }
1282 }
1283 
GetGarbageCollectCode()1284 bool JitCodeCache::GetGarbageCollectCode() {
1285   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1286   return garbage_collect_code_;
1287 }
1288 
SetGarbageCollectCode(bool value)1289 void JitCodeCache::SetGarbageCollectCode(bool value) {
1290   Thread* self = Thread::Current();
1291   MutexLock mu(self, *Locks::jit_lock_);
1292   // Update the flag while holding the lock to ensure no thread will try to GC.
1293   garbage_collect_code_ = value;
1294 }
1295 
GetProfilingInfo(ArtMethod * method,Thread * self)1296 ProfilingInfo* JitCodeCache::GetProfilingInfo(ArtMethod* method, Thread* self) {
1297   ScopedDebugDisallowReadBarriers sddrb(self);
1298   MutexLock mu(self, *Locks::jit_lock_);
1299   auto it = profiling_infos_.find(method);
1300   if (it == profiling_infos_.end()) {
1301     return nullptr;
1302   }
1303   return it->second;
1304 }
1305 
MaybeUpdateInlineCache(ArtMethod * method,uint32_t dex_pc,ObjPtr<mirror::Class> cls,Thread * self)1306 void JitCodeCache::MaybeUpdateInlineCache(ArtMethod* method,
1307                                           uint32_t dex_pc,
1308                                           ObjPtr<mirror::Class> cls,
1309                                           Thread* self) {
1310   ScopedDebugDisallowReadBarriers sddrb(self);
1311   MutexLock mu(self, *Locks::jit_lock_);
1312   auto it = profiling_infos_.find(method);
1313   if (it == profiling_infos_.end()) {
1314     return;
1315   }
1316   ProfilingInfo* info = it->second;
1317   ScopedAssertNoThreadSuspension sants("ProfilingInfo");
1318   info->AddInvokeInfo(dex_pc, cls.Ptr());
1319 }
1320 
DoCollection(Thread * self)1321 void JitCodeCache::DoCollection(Thread* self) {
1322   ScopedTrace trace(__FUNCTION__);
1323 
1324   {
1325     ScopedDebugDisallowReadBarriers sddrb(self);
1326     MutexLock mu(self, *Locks::jit_lock_);
1327     if (!garbage_collect_code_) {
1328       return;
1329     } else if (WaitForPotentialCollectionToComplete(self)) {
1330       return;
1331     }
1332     collection_in_progress_ = true;
1333     number_of_collections_++;
1334     live_bitmap_.reset(CodeCacheBitmap::Create(
1335           "code-cache-bitmap",
1336           reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
1337           reinterpret_cast<uintptr_t>(
1338               private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
1339     {
1340       WriterMutexLock mu2(self, *Locks::jit_mutator_lock_);
1341       processed_zombie_code_.insert(zombie_code_.begin(), zombie_code_.end());
1342       zombie_code_.clear();
1343       processed_zombie_jni_code_.insert(zombie_jni_code_.begin(), zombie_jni_code_.end());
1344       zombie_jni_code_.clear();
1345       // Empty osr method map, as osr compiled code will be deleted (except the ones
1346       // on thread stacks).
1347       for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) {
1348         processed_zombie_code_.insert(it->second);
1349       }
1350       osr_code_map_.clear();
1351     }
1352   }
1353   TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
1354   {
1355     TimingLogger::ScopedTiming st("Code cache collection", &logger);
1356 
1357     {
1358       ScopedObjectAccess soa(self);
1359       // Run a checkpoint on all threads to mark the JIT compiled code they are running.
1360       MarkCompiledCodeOnThreadStacks(self);
1361 
1362       // Remove zombie code which hasn't been marked.
1363       RemoveUnmarkedCode(self);
1364     }
1365 
1366     gc_task_scheduled_ = false;
1367     MutexLock mu(self, *Locks::jit_lock_);
1368     live_bitmap_.reset(nullptr);
1369     collection_in_progress_ = false;
1370     lock_cond_.Broadcast(self);
1371   }
1372 
1373   Runtime::Current()->GetJit()->AddTimingLogger(logger);
1374 }
1375 
LookupMethodHeader(uintptr_t pc,ArtMethod * method)1376 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
1377   static_assert(kRuntimeQuickCodeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
1378   const void* pc_ptr = reinterpret_cast<const void*>(pc);
1379   if (!ContainsPc(pc_ptr)) {
1380     return nullptr;
1381   }
1382 
1383   if (!kIsDebugBuild) {
1384     // Called with null `method` only from MarkCodeClosure::Run() in debug build.
1385     CHECK(method != nullptr);
1386   }
1387 
1388   Thread* self = Thread::Current();
1389   ScopedDebugDisallowReadBarriers sddrb(self);
1390   OatQuickMethodHeader* method_header = nullptr;
1391   ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
1392   if (method != nullptr && UNLIKELY(method->IsNative())) {
1393     ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
1394     auto it = jni_stubs_map_.find(JniStubKey(method));
1395     if (it == jni_stubs_map_.end()) {
1396       return nullptr;
1397     }
1398     if (!ContainsElement(it->second.GetMethods(), method)) {
1399       DCHECK(!OatQuickMethodHeader::FromCodePointer(it->second.GetCode())->Contains(pc))
1400           << "Method missing from stub map, but pc executing the method points to the stub."
1401           << " method= " << method->PrettyMethod()
1402           << " pc= " << std::hex << pc;
1403       return nullptr;
1404     }
1405     const void* code_ptr = it->second.GetCode();
1406     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1407     if (!method_header->Contains(pc)) {
1408       return nullptr;
1409     }
1410   } else {
1411     if (shared_region_.IsInExecSpace(pc_ptr)) {
1412       const void* code_ptr = zygote_map_.GetCodeFor(method, pc);
1413       if (code_ptr != nullptr) {
1414         return OatQuickMethodHeader::FromCodePointer(code_ptr);
1415       }
1416     }
1417     {
1418       ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
1419       auto it = method_code_map_.lower_bound(pc_ptr);
1420       if ((it == method_code_map_.end() || it->first != pc_ptr) &&
1421           it != method_code_map_.begin()) {
1422         --it;
1423       }
1424       if (it != method_code_map_.end()) {
1425         const void* code_ptr = it->first;
1426         if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
1427           method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1428           found_method = it->second;
1429         }
1430       }
1431     }
1432     if (method_header == nullptr && method == nullptr) {
1433       ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
1434       // Scan all compiled JNI stubs as well. This slow search is used only
1435       // for checks in debug build, for release builds the `method` is not null.
1436       for (auto&& entry : jni_stubs_map_) {
1437         const JniStubData& data = entry.second;
1438         if (data.IsCompiled() &&
1439             OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
1440           method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1441         }
1442       }
1443     }
1444     if (method_header == nullptr) {
1445       return nullptr;
1446     }
1447   }
1448 
1449   if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
1450     DCHECK_EQ(found_method, method)
1451         << ArtMethod::PrettyMethod(method) << " "
1452         << ArtMethod::PrettyMethod(found_method) << " "
1453         << std::hex << pc;
1454   }
1455   return method_header;
1456 }
1457 
LookupOsrMethodHeader(ArtMethod * method)1458 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
1459   Thread* self = Thread::Current();
1460   ScopedDebugDisallowReadBarriers sddrb(self);
1461   ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
1462   auto it = osr_code_map_.find(method);
1463   if (it == osr_code_map_.end()) {
1464     return nullptr;
1465   }
1466   return OatQuickMethodHeader::FromCodePointer(it->second);
1467 }
1468 
AddProfilingInfo(Thread * self,ArtMethod * method,const std::vector<uint32_t> & inline_cache_entries,const std::vector<uint32_t> & branch_cache_entries)1469 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
1470                                               ArtMethod* method,
1471                                               const std::vector<uint32_t>& inline_cache_entries,
1472                                               const std::vector<uint32_t>& branch_cache_entries) {
1473   DCHECK(CanAllocateProfilingInfo());
1474   ProfilingInfo* info = nullptr;
1475   {
1476     MutexLock mu(self, *Locks::jit_lock_);
1477     info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries);
1478   }
1479 
1480   if (info == nullptr) {
1481     IncreaseCodeCacheCapacity(self);
1482     MutexLock mu(self, *Locks::jit_lock_);
1483     info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries);
1484   }
1485   return info;
1486 }
1487 
AddProfilingInfoInternal(Thread * self,ArtMethod * method,const std::vector<uint32_t> & inline_cache_entries,const std::vector<uint32_t> & branch_cache_entries)1488 ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(
1489     Thread* self,
1490     ArtMethod* method,
1491     const std::vector<uint32_t>& inline_cache_entries,
1492     const std::vector<uint32_t>& branch_cache_entries) {
1493   ScopedDebugDisallowReadBarriers sddrb(self);
1494   // Check whether some other thread has concurrently created it.
1495   auto it = profiling_infos_.find(method);
1496   if (it != profiling_infos_.end()) {
1497     return it->second;
1498   }
1499 
1500   size_t profile_info_size =
1501       ProfilingInfo::ComputeSize(inline_cache_entries.size(), branch_cache_entries.size());
1502 
1503   const uint8_t* data = private_region_.AllocateData(profile_info_size);
1504   if (data == nullptr) {
1505     return nullptr;
1506   }
1507   uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
1508   ProfilingInfo* info =
1509       new (writable_data) ProfilingInfo(method, inline_cache_entries, branch_cache_entries);
1510 
1511   profiling_infos_.Put(method, info);
1512   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
1513   return info;
1514 }
1515 
MoreCore(const void * mspace,intptr_t increment)1516 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
1517   return shared_region_.OwnsSpace(mspace)
1518       ? shared_region_.MoreCore(mspace, increment)
1519       : private_region_.MoreCore(mspace, increment);
1520 }
1521 
GetProfiledMethods(const std::set<std::string> & dex_base_locations,std::vector<ProfileMethodInfo> & methods,uint16_t inline_cache_threshold)1522 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
1523                                       std::vector<ProfileMethodInfo>& methods,
1524                                       uint16_t inline_cache_threshold) {
1525   ScopedTrace trace(__FUNCTION__);
1526   Thread* self = Thread::Current();
1527 
1528   // Preserve class loaders to prevent unloading while we're processing
1529   // ArtMethods.
1530   VariableSizedHandleScope handles(self);
1531   Runtime::Current()->GetClassLinker()->GetClassLoaders(self, &handles);
1532 
1533   // Wait for any GC to be complete, to prevent looking at ArtMethods whose
1534   // class loader is being deleted. Since we remain runnable, another new GC
1535   // can't get far.
1536   Runtime::Current()->GetHeap()->WaitForGcToComplete(gc::kGcCauseProfileSaver, self);
1537 
1538   // We'll be looking at inline caches, so ensure they are accessible.
1539   WaitUntilInlineCacheAccessible(self);
1540 
1541   SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos;
1542   std::vector<ArtMethod*> copies;
1543   {
1544     MutexLock mu(self, *Locks::jit_lock_);
1545     profiling_infos = profiling_infos_;
1546     ReaderMutexLock mu2(self, *Locks::jit_mutator_lock_);
1547     for (const auto& entry : method_code_map_) {
1548       copies.push_back(entry.second);
1549     }
1550   }
1551   for (ArtMethod* method : copies) {
1552     auto it = profiling_infos.find(method);
1553     ProfilingInfo* info = (it == profiling_infos.end()) ? nullptr : it->second;
1554     const DexFile* dex_file = method->GetDexFile();
1555     const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
1556     if (!ContainsElement(dex_base_locations, base_location)) {
1557       // Skip dex files which are not profiled.
1558       continue;
1559     }
1560     std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
1561 
1562     if (info != nullptr) {
1563       // If the method is still baseline compiled and doesn't meet the inline cache threshold, don't
1564       // save the inline caches because they might be incomplete.
1565       // Although we don't deoptimize for incomplete inline caches in AOT-compiled code, inlining
1566       // leads to larger generated code.
1567       // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
1568       const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1569       if (ContainsPc(entry_point) &&
1570           CodeInfo::IsBaseline(
1571               OatQuickMethodHeader::FromEntryPoint(entry_point)->GetOptimizedCodeInfoPtr()) &&
1572           (ProfilingInfo::GetOptimizeThreshold() - info->GetBaselineHotnessCount()) <
1573               inline_cache_threshold) {
1574         methods.emplace_back(/*ProfileMethodInfo*/
1575             MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1576         continue;
1577       }
1578 
1579       for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
1580         std::vector<TypeReference> profile_classes;
1581         const InlineCache& cache = info->GetInlineCaches()[i];
1582         ArtMethod* caller = info->GetMethod();
1583         bool is_missing_types = false;
1584         for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
1585           mirror::Class* cls = cache.classes_[k].Read();
1586           if (cls == nullptr) {
1587             break;
1588           }
1589 
1590           // Check if the receiver is in the boot class path or if it's in the
1591           // same class loader as the caller. If not, skip it, as there is not
1592           // much we can do during AOT.
1593           if (!cls->IsBootStrapClassLoaded() &&
1594               caller->GetClassLoader() != cls->GetClassLoader()) {
1595             is_missing_types = true;
1596             continue;
1597           }
1598 
1599           const DexFile* class_dex_file = nullptr;
1600           dex::TypeIndex type_index;
1601 
1602           if (cls->GetDexCache() == nullptr) {
1603             DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
1604             // Make a best effort to find the type index in the method's dex file.
1605             // We could search all open dex files but that might turn expensive
1606             // and probably not worth it.
1607             class_dex_file = dex_file;
1608             type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
1609           } else {
1610             class_dex_file = &(cls->GetDexFile());
1611             type_index = cls->GetDexTypeIndex();
1612           }
1613           if (!type_index.IsValid()) {
1614             // Could be a proxy class or an array for which we couldn't find the type index.
1615             is_missing_types = true;
1616             continue;
1617           }
1618           if (ContainsElement(dex_base_locations,
1619                               DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
1620             // Only consider classes from the same apk (including multidex).
1621             profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
1622                 class_dex_file, type_index);
1623           } else {
1624             is_missing_types = true;
1625           }
1626         }
1627         if (!profile_classes.empty()) {
1628           inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
1629               cache.dex_pc_, is_missing_types, profile_classes);
1630         }
1631       }
1632     }
1633     methods.emplace_back(/*ProfileMethodInfo*/
1634         MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1635   }
1636 }
1637 
IsOsrCompiled(ArtMethod * method)1638 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
1639   Thread* self = Thread::Current();
1640   ScopedDebugDisallowReadBarriers sddrb(self);
1641   ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
1642   return osr_code_map_.find(method) != osr_code_map_.end();
1643 }
1644 
NotifyCompilationOf(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)1645 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
1646                                        Thread* self,
1647                                        CompilationKind compilation_kind,
1648                                        bool prejit) {
1649   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
1650   if (compilation_kind == CompilationKind::kBaseline && ContainsPc(existing_entry_point)) {
1651     // The existing entry point is either already baseline, or optimized. No
1652     // need to compile.
1653     VLOG(jit) << "Not compiling "
1654               << method->PrettyMethod()
1655               << " baseline, because it has already been compiled";
1656     return false;
1657   }
1658 
1659   if (method->NeedsClinitCheckBeforeCall() && !prejit) {
1660     // We do not need a synchronization barrier for checking the visibly initialized status
1661     // or checking the initialized status just for requesting visible initialization.
1662     ClassStatus status = method->GetDeclaringClass()
1663         ->GetStatus<kDefaultVerifyFlags, /*kWithSynchronizationBarrier=*/ false>();
1664     if (status != ClassStatus::kVisiblyInitialized) {
1665       // Unless we're pre-jitting, we currently don't save the JIT compiled code if we cannot
1666       // update the entrypoint due to needing an initialization check.
1667       if (status == ClassStatus::kInitialized) {
1668         // Request visible initialization but do not block to allow compiling other methods.
1669         // Hopefully, this will complete by the time the method becomes hot again.
1670         Runtime::Current()->GetClassLinker()->MakeInitializedClassesVisiblyInitialized(
1671             self, /*wait=*/ false);
1672       }
1673       VLOG(jit) << "Not compiling "
1674                 << method->PrettyMethod()
1675                 << " because it has the resolution stub";
1676       return false;
1677     }
1678   }
1679 
1680   ScopedDebugDisallowReadBarriers sddrb(self);
1681   if (compilation_kind == CompilationKind::kOsr) {
1682     ReaderMutexLock mu(self, *Locks::jit_mutator_lock_);
1683     if (osr_code_map_.find(method) != osr_code_map_.end()) {
1684       return false;
1685     }
1686   }
1687 
1688   if (UNLIKELY(method->IsNative())) {
1689     JniStubKey key(method);
1690     MutexLock mu2(self, *Locks::jit_lock_);
1691     WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
1692     auto it = jni_stubs_map_.find(key);
1693     bool new_compilation = false;
1694     if (it == jni_stubs_map_.end()) {
1695       // Create a new entry to mark the stub as being compiled.
1696       it = jni_stubs_map_.Put(key, JniStubData{});
1697       new_compilation = true;
1698     }
1699     JniStubData* data = &it->second;
1700     data->AddMethod(method);
1701     if (data->IsCompiled()) {
1702       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
1703       const void* entrypoint = method_header->GetEntryPoint();
1704       // Update also entrypoints of other methods held by the JniStubData.
1705       // We could simply update the entrypoint of `method` but if the last JIT GC has
1706       // changed these entrypoints to GenericJNI in preparation for a full GC, we may
1707       // as well change them back as this stub shall not be collected anyway and this
1708       // can avoid a few expensive GenericJNI calls.
1709       for (ArtMethod* m : it->second.GetMethods()) {
1710         zombie_jni_code_.erase(m);
1711         processed_zombie_jni_code_.erase(m);
1712       }
1713       data->UpdateEntryPoints(entrypoint);
1714     }
1715     return new_compilation;
1716   } else {
1717     if (compilation_kind == CompilationKind::kBaseline) {
1718       DCHECK(CanAllocateProfilingInfo());
1719     }
1720   }
1721   return true;
1722 }
1723 
NotifyCompilerUse(ArtMethod * method,Thread * self)1724 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
1725   ScopedDebugDisallowReadBarriers sddrb(self);
1726   MutexLock mu(self, *Locks::jit_lock_);
1727   auto it = profiling_infos_.find(method);
1728   if (it == profiling_infos_.end()) {
1729     return nullptr;
1730   }
1731   if (!it->second->IncrementInlineUse()) {
1732     // Overflow of inlining uses, just bail.
1733     return nullptr;
1734   }
1735   return it->second;
1736 }
1737 
DoneCompilerUse(ArtMethod * method,Thread * self)1738 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
1739   ScopedDebugDisallowReadBarriers sddrb(self);
1740   MutexLock mu(self, *Locks::jit_lock_);
1741   auto it = profiling_infos_.find(method);
1742   DCHECK(it != profiling_infos_.end());
1743   it->second->DecrementInlineUse();
1744 }
1745 
DoneCompiling(ArtMethod * method,Thread * self)1746 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
1747   DCHECK_EQ(Thread::Current(), self);
1748   ScopedDebugDisallowReadBarriers sddrb(self);
1749   if (UNLIKELY(method->IsNative())) {
1750     WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
1751     auto it = jni_stubs_map_.find(JniStubKey(method));
1752     DCHECK(it != jni_stubs_map_.end());
1753     JniStubData* data = &it->second;
1754     DCHECK(ContainsElement(data->GetMethods(), method));
1755     if (UNLIKELY(!data->IsCompiled())) {
1756       // Failed to compile; the JNI compiler never fails, but the cache may be full.
1757       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
1758     }  // else Commit() updated entrypoints of all methods in the JniStubData.
1759   }
1760 }
1761 
InvalidateAllCompiledCode()1762 void JitCodeCache::InvalidateAllCompiledCode() {
1763   Thread* self = Thread::Current();
1764   ScopedDebugDisallowReadBarriers sddrb(self);
1765   VLOG(jit) << "Invalidating all compiled code";
1766   Runtime* runtime = Runtime::Current();
1767   ClassLinker* linker = runtime->GetClassLinker();
1768   instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
1769 
1770   {
1771     WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
1772     // Change entry points of native methods back to the GenericJNI entrypoint.
1773     for (const auto& entry : jni_stubs_map_) {
1774       const JniStubData& data = entry.second;
1775       if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
1776         continue;
1777       }
1778       const OatQuickMethodHeader* method_header =
1779           OatQuickMethodHeader::FromCodePointer(data.GetCode());
1780       for (ArtMethod* method : data.GetMethods()) {
1781         if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
1782           instr->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
1783         }
1784       }
1785     }
1786 
1787     for (const auto& entry : method_code_map_) {
1788       ArtMethod* meth = entry.second;
1789       if (UNLIKELY(meth->IsObsolete())) {
1790         linker->SetEntryPointsForObsoleteMethod(meth);
1791       } else {
1792         instr->InitializeMethodsCode(meth, /*aot_code=*/ nullptr);
1793       }
1794     }
1795     osr_code_map_.clear();
1796     saved_compiled_methods_map_.clear();
1797   }
1798 
1799   for (const auto& entry : zygote_map_) {
1800     if (entry.method == nullptr) {
1801       continue;
1802     }
1803     if (entry.method->IsPreCompiled()) {
1804       entry.method->ClearPreCompiled();
1805     }
1806     instr->InitializeMethodsCode(entry.method, /*aot_code=*/nullptr);
1807   }
1808 }
1809 
InvalidateCompiledCodeFor(ArtMethod * method,const OatQuickMethodHeader * header)1810 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
1811                                              const OatQuickMethodHeader* header) {
1812   DCHECK(!method->IsNative());
1813   const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode();
1814 
1815   // Clear the method counter if we are running jitted code since we might want to jit this again in
1816   // the future.
1817   if (method_entrypoint == header->GetEntryPoint()) {
1818     // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point.
1819     Runtime::Current()->GetInstrumentation()->InitializeMethodsCode(method, /*aot_code=*/ nullptr);
1820   } else {
1821     Thread* self = Thread::Current();
1822     ScopedDebugDisallowReadBarriers sddrb(self);
1823     WriterMutexLock mu(self, *Locks::jit_mutator_lock_);
1824     auto it = osr_code_map_.find(method);
1825     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
1826       // Remove the OSR method, to avoid using it again.
1827       osr_code_map_.erase(it);
1828     }
1829   }
1830 
1831   // In case the method was pre-compiled, clear that information so we
1832   // can recompile it ourselves.
1833   if (method->IsPreCompiled()) {
1834     method->ClearPreCompiled();
1835   }
1836 }
1837 
Dump(std::ostream & os)1838 void JitCodeCache::Dump(std::ostream& os) {
1839   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1840   os << "Current JIT code cache size (used / resident): "
1841      << GetCurrentRegion()->GetUsedMemoryForCode() / KB << "KB / "
1842      << GetCurrentRegion()->GetResidentMemoryForCode() / KB << "KB\n"
1843      << "Current JIT data cache size (used / resident): "
1844      << GetCurrentRegion()->GetUsedMemoryForData() / KB << "KB / "
1845      << GetCurrentRegion()->GetResidentMemoryForData() / KB << "KB\n";
1846   if (!Runtime::Current()->IsZygote()) {
1847     os << "Zygote JIT code cache size (at point of fork): "
1848        << shared_region_.GetUsedMemoryForCode() / KB << "KB / "
1849        << shared_region_.GetResidentMemoryForCode() / KB << "KB\n"
1850        << "Zygote JIT data cache size (at point of fork): "
1851        << shared_region_.GetUsedMemoryForData() / KB << "KB / "
1852        << shared_region_.GetResidentMemoryForData() / KB << "KB\n";
1853   }
1854   ReaderMutexLock mu2(Thread::Current(), *Locks::jit_mutator_lock_);
1855   os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
1856      << "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n"
1857      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
1858      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
1859      << "Total number of JIT baseline compilations: " << number_of_baseline_compilations_ << "\n"
1860      << "Total number of JIT optimized compilations: " << number_of_optimized_compilations_ << "\n"
1861      << "Total number of JIT compilations for on stack replacement: "
1862         << number_of_osr_compilations_ << "\n"
1863      << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
1864   histogram_stack_map_memory_use_.PrintMemoryUse(os);
1865   histogram_code_memory_use_.PrintMemoryUse(os);
1866   histogram_profiling_info_memory_use_.PrintMemoryUse(os);
1867 }
1868 
DumpAllCompiledMethods(std::ostream & os)1869 void JitCodeCache::DumpAllCompiledMethods(std::ostream& os) {
1870   ReaderMutexLock mu(Thread::Current(), *Locks::jit_mutator_lock_);
1871   for (const auto& [code_ptr, meth] : method_code_map_) {  // Includes OSR methods.
1872     OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1873     os << meth->PrettyMethod() << "@"  << std::hex
1874        << code_ptr << "-" << reinterpret_cast<uintptr_t>(code_ptr) + header->GetCodeSize() << '\n';
1875   }
1876   os << "JNIStubs: \n";
1877   for (const auto& [_, data] : jni_stubs_map_) {
1878     const void* code_ptr = data.GetCode();
1879     if (code_ptr == nullptr) {
1880       continue;
1881     }
1882     OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1883     os << std::hex << code_ptr << "-"
1884        << reinterpret_cast<uintptr_t>(code_ptr) + header->GetCodeSize() << " ";
1885     for (ArtMethod* m : data.GetMethods()) {
1886       os << m->PrettyMethod() << ";";
1887     }
1888     os << "\n";
1889   }
1890 }
1891 
PostForkChildAction(bool is_system_server,bool is_zygote)1892 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
1893   Thread* self = Thread::Current();
1894 
1895   // Remove potential tasks that have been inherited from the zygote.
1896   // We do this now and not in Jit::PostForkChildAction, as system server calls
1897   // JitCodeCache::PostForkChildAction first, and then does some code loading
1898   // that may result in new JIT tasks that we want to keep.
1899   Runtime* runtime = Runtime::Current();
1900   JitThreadPool* pool = runtime->GetJit()->GetThreadPool();
1901   if (pool != nullptr) {
1902     pool->RemoveAllTasks(self);
1903   }
1904 
1905   MutexLock mu(self, *Locks::jit_lock_);
1906 
1907   // Reset potential writable MemMaps inherited from the zygote. We never want
1908   // to write to them.
1909   shared_region_.ResetWritableMappings();
1910 
1911   if (is_zygote || runtime->IsSafeMode()) {
1912     // Don't create a private region for a child zygote. Regions are usually map shared
1913     // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
1914     return;
1915   }
1916 
1917   // Reset all statistics to be specific to this process.
1918   number_of_baseline_compilations_ = 0;
1919   number_of_optimized_compilations_ = 0;
1920   number_of_osr_compilations_ = 0;
1921   number_of_collections_ = 0;
1922   histogram_stack_map_memory_use_.Reset();
1923   histogram_code_memory_use_.Reset();
1924   histogram_profiling_info_memory_use_.Reset();
1925 
1926   size_t initial_capacity = runtime->GetJITOptions()->GetCodeCacheInitialCapacity();
1927   size_t max_capacity = runtime->GetJITOptions()->GetCodeCacheMaxCapacity();
1928   std::string error_msg;
1929   if (!private_region_.Initialize(initial_capacity,
1930                                   max_capacity,
1931                                   /* rwx_memory_allowed= */ !is_system_server,
1932                                   is_zygote,
1933                                   &error_msg)) {
1934     LOG(FATAL) << "Could not create private region after zygote fork: " << error_msg;
1935   }
1936   if (private_region_.HasCodeMapping()) {
1937     const MemMap* exec_pages = private_region_.GetExecPages();
1938     runtime->AddGeneratedCodeRange(exec_pages->Begin(), exec_pages->Size());
1939   }
1940 }
1941 
GetCurrentRegion()1942 JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
1943   return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
1944 }
1945 
VisitAllMethods(const std::function<void (const void *,ArtMethod *)> & cb)1946 void JitCodeCache::VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) {
1947   for (const auto& it : jni_stubs_map_) {
1948     const JniStubData& data = it.second;
1949     if (data.IsCompiled()) {
1950       for (ArtMethod* method : data.GetMethods()) {
1951         cb(data.GetCode(), method);
1952       }
1953     }
1954   }
1955   for (const auto& it : method_code_map_) {  // Includes OSR methods.
1956     cb(it.first, it.second);
1957   }
1958   for (const auto& it : saved_compiled_methods_map_) {
1959     cb(it.second, it.first);
1960   }
1961   for (const auto& it : zygote_map_) {
1962     if (it.code_ptr != nullptr && it.method != nullptr) {
1963       cb(it.code_ptr, it.method);
1964     }
1965   }
1966 }
1967 
Initialize(uint32_t number_of_methods)1968 void ZygoteMap::Initialize(uint32_t number_of_methods) {
1969   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1970   // Allocate for 40-80% capacity. This will offer OK lookup times, and termination
1971   // cases.
1972   size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80);
1973   const uint8_t* memory = region_->AllocateData(
1974       capacity * sizeof(Entry) + sizeof(ZygoteCompilationState));
1975   if (memory == nullptr) {
1976     LOG(WARNING) << "Could not allocate data for the zygote map";
1977     return;
1978   }
1979   const Entry* data = reinterpret_cast<const Entry*>(memory);
1980   region_->FillData(data, capacity, Entry { nullptr, nullptr });
1981   map_ = ArrayRef(data, capacity);
1982   compilation_state_ = reinterpret_cast<const ZygoteCompilationState*>(
1983       memory + capacity * sizeof(Entry));
1984   region_->WriteData(compilation_state_, ZygoteCompilationState::kInProgress);
1985 }
1986 
GetCodeFor(ArtMethod * method,uintptr_t pc) const1987 const void* ZygoteMap::GetCodeFor(ArtMethod* method, uintptr_t pc) const {
1988   if (map_.empty()) {
1989     return nullptr;
1990   }
1991 
1992   if (method == nullptr) {
1993     // Do a linear search. This should only be used in debug builds.
1994     CHECK(kIsDebugBuild);
1995     for (const Entry& entry : map_) {
1996       const void* code_ptr = entry.code_ptr;
1997       if (code_ptr != nullptr) {
1998         OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1999         if (method_header->Contains(pc)) {
2000           return code_ptr;
2001         }
2002       }
2003     }
2004     return nullptr;
2005   }
2006 
2007   std::hash<ArtMethod*> hf;
2008   size_t index = hf(method) & (map_.size() - 1u);
2009   size_t original_index = index;
2010   // Loop over the array: we know this loop terminates as we will either
2011   // encounter the given method, or a null entry. Both terminate the loop.
2012   // Note that the zygote may concurrently write new entries to the map. That's OK as the
2013   // map is never resized.
2014   while (true) {
2015     const Entry& entry = map_[index];
2016     if (entry.method == nullptr) {
2017       // Not compiled yet.
2018       return nullptr;
2019     }
2020     if (entry.method == method) {
2021       if (entry.code_ptr == nullptr) {
2022         // This is a race with the zygote which wrote the method, but hasn't written the
2023         // code. Just bail and wait for the next time we need the method.
2024         return nullptr;
2025       }
2026       if (pc != 0 && !OatQuickMethodHeader::FromCodePointer(entry.code_ptr)->Contains(pc)) {
2027         return nullptr;
2028       }
2029       return entry.code_ptr;
2030     }
2031     index = (index + 1) & (map_.size() - 1);
2032     DCHECK_NE(original_index, index);
2033   }
2034 }
2035 
Put(const void * code,ArtMethod * method)2036 void ZygoteMap::Put(const void* code, ArtMethod* method) {
2037   if (map_.empty()) {
2038     return;
2039   }
2040   CHECK(Runtime::Current()->IsZygote());
2041   std::hash<ArtMethod*> hf;
2042   size_t index = hf(method) & (map_.size() - 1);
2043   size_t original_index = index;
2044   // Because the size of the map is bigger than the number of methods that will
2045   // be added, we are guaranteed to find a free slot in the array, and
2046   // therefore for this loop to terminate.
2047   while (true) {
2048     const Entry* entry = &map_[index];
2049     if (entry->method == nullptr) {
2050       // Note that readers can read this memory concurrently, but that's OK as
2051       // we are writing pointers.
2052       region_->WriteData(entry, Entry { method, code });
2053       break;
2054     }
2055     index = (index + 1) & (map_.size() - 1);
2056     DCHECK_NE(original_index, index);
2057   }
2058   DCHECK_EQ(GetCodeFor(method), code);
2059 }
2060 
2061 }  // namespace jit
2062 }  // namespace art
2063