1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jit.h"
18
19 #include <dlfcn.h>
20 #include <sys/resource.h>
21
22 #include "app_info.h"
23 #include "art_method-inl.h"
24 #include "base/file_utils.h"
25 #include "base/logging.h" // For VLOG.
26 #include "base/memfd.h"
27 #include "base/memory_tool.h"
28 #include "base/pointer_size.h"
29 #include "base/runtime_debug.h"
30 #include "base/scoped_flock.h"
31 #include "base/utils.h"
32 #include "class_root-inl.h"
33 #include "compilation_kind.h"
34 #include "debugger.h"
35 #include "dex/type_lookup_table.h"
36 #include "entrypoints/entrypoint_utils-inl.h"
37 #include "entrypoints/runtime_asm_entrypoints.h"
38 #include "gc/space/image_space.h"
39 #include "gc/task_processor.h"
40 #include "interpreter/interpreter.h"
41 #include "jit-inl.h"
42 #include "jit_code_cache.h"
43 #include "jit_create.h"
44 #include "jni/java_vm_ext.h"
45 #include "mirror/method_handle_impl.h"
46 #include "mirror/var_handle.h"
47 #include "oat/image-inl.h"
48 #include "oat/oat_file.h"
49 #include "oat/oat_file_manager.h"
50 #include "oat/oat_quick_method_header.h"
51 #include "oat/stack_map.h"
52 #include "profile/profile_boot_info.h"
53 #include "profile/profile_compilation_info.h"
54 #include "profile_saver.h"
55 #include "runtime.h"
56 #include "runtime_options.h"
57 #include "small_pattern_matcher.h"
58 #include "stack.h"
59 #include "thread-inl.h"
60 #include "thread_list.h"
61
62 using android::base::unique_fd;
63
64 namespace art HIDDEN {
65 namespace jit {
66
67 static constexpr bool kEnableOnStackReplacement = true;
68
69 // JIT compiler
70 JitCompilerInterface* Jit::jit_compiler_ = nullptr;
71
DumpInfo(std::ostream & os)72 void Jit::DumpInfo(std::ostream& os) {
73 code_cache_->Dump(os);
74 cumulative_timings_.Dump(os);
75 MutexLock mu(Thread::Current(), lock_);
76 memory_use_.PrintMemoryUse(os);
77 }
78
DumpForSigQuit(std::ostream & os)79 void Jit::DumpForSigQuit(std::ostream& os) {
80 DumpInfo(os);
81 ProfileSaver::DumpInstanceInfo(os);
82 }
83
AddTimingLogger(const TimingLogger & logger)84 void Jit::AddTimingLogger(const TimingLogger& logger) {
85 cumulative_timings_.AddLogger(logger);
86 }
87
Jit(JitCodeCache * code_cache,JitOptions * options)88 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
89 : code_cache_(code_cache),
90 options_(options),
91 boot_completed_lock_("Jit::boot_completed_lock_"),
92 cumulative_timings_("JIT timings"),
93 memory_use_("Memory used for compilation", 16),
94 lock_("JIT memory use lock"),
95 zygote_mapping_methods_(),
96 fd_methods_(-1),
97 fd_methods_size_(0) {}
98
Create(JitCodeCache * code_cache,JitOptions * options)99 std::unique_ptr<Jit> Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
100 jit_compiler_ = jit_create();
101 std::unique_ptr<Jit> jit(new Jit(code_cache, options));
102
103 // If the code collector is enabled, check if that still holds:
104 // With 'perf', we want a 1-1 mapping between an address and a method.
105 // We aren't able to keep method pointers live during the instrumentation method entry trampoline
106 // so we will just disable jit-gc if we are doing that.
107 // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
108 // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
109 // Jit GC for now (b/147208992).
110 if (code_cache->GetGarbageCollectCode()) {
111 code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
112 !jit->JitAtFirstUse());
113 }
114
115 VLOG(jit) << "JIT created with initial_capacity="
116 << PrettySize(options->GetCodeCacheInitialCapacity())
117 << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
118 << ", warmup_threshold=" << options->GetWarmupThreshold()
119 << ", optimize_threshold=" << options->GetOptimizeThreshold()
120 << ", profile_saver_options=" << options->GetProfileSaverOptions();
121
122 // We want to know whether the compiler is compiling baseline, as this
123 // affects how we GC ProfilingInfos.
124 for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
125 if (option == "--baseline") {
126 options->SetUseBaselineCompiler();
127 break;
128 }
129 }
130
131 // Notify native debugger about the classes already loaded before the creation of the jit.
132 jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
133
134 return jit;
135 }
136
137
TryPatternMatch(ArtMethod * method_to_compile,CompilationKind compilation_kind)138 bool Jit::TryPatternMatch(ArtMethod* method_to_compile, CompilationKind compilation_kind) {
139 // Try to pattern match the method. Only on arm and arm64 for now as we have
140 // sufficiently similar calling convention between C++ and managed code.
141 if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) {
142 if (!Runtime::Current()->IsJavaDebuggable() &&
143 compilation_kind == CompilationKind::kBaseline &&
144 !method_to_compile->StillNeedsClinitCheck()) {
145 const void* pattern = SmallPatternMatcher::TryMatch(method_to_compile);
146 if (pattern != nullptr) {
147 VLOG(jit) << "Successfully pattern matched " << method_to_compile->PrettyMethod();
148 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method_to_compile, pattern);
149 return true;
150 }
151 }
152 }
153 return false;
154 }
155
CompileMethodInternal(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)156 bool Jit::CompileMethodInternal(ArtMethod* method,
157 Thread* self,
158 CompilationKind compilation_kind,
159 bool prejit) {
160 DCHECK(Runtime::Current()->UseJitCompilation());
161 DCHECK(!method->IsRuntimeMethod());
162
163 // If the baseline flag was explicitly passed in the compiler options, change the compilation kind
164 // from optimized to baseline.
165 if (jit_compiler_->IsBaselineCompiler() && compilation_kind == CompilationKind::kOptimized) {
166 compilation_kind = CompilationKind::kBaseline;
167 }
168
169 if (method->IsPreCompiled() && !prejit) {
170 VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
171 << " due to method marked pre-compile,"
172 << " and the compilation request isn't for pre-compilation.";
173 return false;
174 }
175
176 // If we're asked to compile baseline, but we cannot allocate profiling infos,
177 // change the compilation kind to optimized.
178 if ((compilation_kind == CompilationKind::kBaseline) &&
179 !GetCodeCache()->CanAllocateProfilingInfo()) {
180 compilation_kind = CompilationKind::kOptimized;
181 }
182
183 // Don't compile the method if it has breakpoints.
184 if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
185 VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
186 << " due to not being safe to jit according to runtime-callbacks. For example, there"
187 << " could be breakpoints in this method.";
188 return false;
189 }
190
191 if (!method->IsCompilable()) {
192 DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
193 method->IsProxyMethod()) << method->PrettyMethod();
194 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
195 << "obsolete while waiting for JIT task to run. This probably happened due to "
196 << "concurrent structural class redefinition.";
197 return false;
198 }
199
200 // Don't compile the method if we are supposed to be deoptimized.
201 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
202 if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
203 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization";
204 return false;
205 }
206
207 JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
208 if ((compilation_kind == CompilationKind::kOsr) && GetCodeCache()->IsSharedRegion(*region)) {
209 VLOG(jit) << "JIT not osr compiling "
210 << method->PrettyMethod()
211 << " due to using shared region";
212 return false;
213 }
214
215 // If we get a request to compile a proxy method, we pass the actual Java method
216 // of that proxy method, as the compiler does not expect a proxy method.
217 ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
218
219 if (TryPatternMatch(method_to_compile, compilation_kind)) {
220 return true;
221 }
222
223 if (!code_cache_->NotifyCompilationOf(method_to_compile, self, compilation_kind, prejit)) {
224 return false;
225 }
226
227 VLOG(jit) << "Compiling method "
228 << ArtMethod::PrettyMethod(method_to_compile)
229 << " kind=" << compilation_kind;
230 bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
231 code_cache_->DoneCompiling(method_to_compile, self);
232 if (!success) {
233 VLOG(jit) << "Failed to compile method "
234 << ArtMethod::PrettyMethod(method_to_compile)
235 << " kind=" << compilation_kind;
236 }
237 if (kIsDebugBuild) {
238 if (self->IsExceptionPending()) {
239 mirror::Throwable* exception = self->GetException();
240 LOG(FATAL) << "No pending exception expected after compiling "
241 << ArtMethod::PrettyMethod(method)
242 << ": "
243 << exception->Dump();
244 }
245 }
246 return success;
247 }
248
WaitForWorkersToBeCreated()249 void Jit::WaitForWorkersToBeCreated() {
250 if (thread_pool_ != nullptr) {
251 thread_pool_->WaitForWorkersToBeCreated();
252 }
253 }
254
DeleteThreadPool()255 void Jit::DeleteThreadPool() {
256 Thread* self = Thread::Current();
257 if (thread_pool_ != nullptr) {
258 std::unique_ptr<JitThreadPool> pool;
259 {
260 ScopedSuspendAll ssa(__FUNCTION__);
261 // Clear thread_pool_ field while the threads are suspended.
262 // A mutator in the 'AddSamples' method will check against it.
263 pool = std::move(thread_pool_);
264 }
265
266 // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
267 if (!kRunningOnMemoryTool) {
268 pool->StopWorkers(self);
269 pool->RemoveAllTasks(self);
270 }
271 // We could just suspend all threads, but we know those threads
272 // will finish in a short period, so it's not worth adding a suspend logic
273 // here. Besides, this is only done for shutdown.
274 pool->Wait(self, false, false);
275 }
276 }
277
StartProfileSaver(const std::string & profile_filename,const std::vector<std::string> & code_paths,const std::string & ref_profile_filename,AppInfo::CodeType code_type)278 void Jit::StartProfileSaver(const std::string& profile_filename,
279 const std::vector<std::string>& code_paths,
280 const std::string& ref_profile_filename,
281 AppInfo::CodeType code_type) {
282 if (options_->GetSaveProfilingInfo()) {
283 ProfileSaver::Start(options_->GetProfileSaverOptions(),
284 profile_filename,
285 code_cache_,
286 code_paths,
287 ref_profile_filename,
288 code_type);
289 }
290 }
291
StopProfileSaver()292 void Jit::StopProfileSaver() {
293 if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
294 ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
295 }
296 }
297
JitAtFirstUse()298 bool Jit::JitAtFirstUse() {
299 return HotMethodThreshold() == 0;
300 }
301
CanInvokeCompiledCode(ArtMethod * method)302 bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
303 return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
304 }
305
~Jit()306 Jit::~Jit() {
307 DCHECK_IMPLIES(options_->GetSaveProfilingInfo(), !ProfileSaver::IsStarted());
308 if (options_->DumpJitInfoOnShutdown()) {
309 DumpInfo(LOG_STREAM(INFO));
310 Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
311 }
312 DeleteThreadPool();
313 if (jit_compiler_ != nullptr) {
314 delete jit_compiler_;
315 jit_compiler_ = nullptr;
316 }
317 }
318
NewTypeLoadedIfUsingJit(mirror::Class * type)319 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
320 if (!Runtime::Current()->UseJitCompilation()) {
321 // No need to notify if we only use the JIT to save profiles.
322 return;
323 }
324 jit::Jit* jit = Runtime::Current()->GetJit();
325 if (jit->jit_compiler_->GenerateDebugInfo()) {
326 jit_compiler_->TypesLoaded(&type, 1);
327 }
328 }
329
DumpTypeInfoForLoadedTypes(ClassLinker * linker)330 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
331 struct CollectClasses : public ClassVisitor {
332 bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
333 classes_.push_back(klass.Ptr());
334 return true;
335 }
336 std::vector<mirror::Class*> classes_;
337 };
338
339 if (jit_compiler_->GenerateDebugInfo()) {
340 ScopedObjectAccess so(Thread::Current());
341
342 CollectClasses visitor;
343 linker->VisitClasses(&visitor);
344 jit_compiler_->TypesLoaded(visitor.classes_.data(), visitor.classes_.size());
345 }
346 }
347
348 extern "C" void art_quick_osr_stub(void** stack,
349 size_t stack_size_in_bytes,
350 const uint8_t* native_pc,
351 JValue* result,
352 const char* shorty,
353 Thread* self);
354
PrepareForOsr(ArtMethod * method,uint32_t dex_pc,uint32_t * vregs)355 OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
356 if (!kEnableOnStackReplacement) {
357 return nullptr;
358 }
359
360 // Cheap check if the method has been compiled already. That's an indicator that we should
361 // osr into it.
362 if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
363 return nullptr;
364 }
365
366 // Fetch some data before looking up for an OSR method. We don't want thread
367 // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
368 // method while we are being suspended.
369 CodeItemDataAccessor accessor(method->DexInstructionData());
370 const size_t number_of_vregs = accessor.RegistersSize();
371 std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
372 OsrData* osr_data = nullptr;
373
374 {
375 ScopedAssertNoThreadSuspension sts("Holding OSR method");
376 const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
377 if (osr_method == nullptr) {
378 // No osr method yet, just return to the interpreter.
379 return nullptr;
380 }
381
382 CodeInfo code_info(osr_method);
383
384 // Find stack map starting at the target dex_pc.
385 StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
386 if (!stack_map.IsValid()) {
387 // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
388 // hope that the next branch has one.
389 return nullptr;
390 }
391
392 // We found a stack map, now fill the frame with dex register values from the interpreter's
393 // shadow frame.
394 DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
395 DCHECK_EQ(vreg_map.size(), number_of_vregs);
396
397 size_t frame_size = osr_method->GetFrameSizeInBytes();
398
399 // Allocate memory to put shadow frame values. The osr stub will copy that memory to
400 // stack.
401 // Note that we could pass the shadow frame to the stub, and let it copy the values there,
402 // but that is engineering complexity not worth the effort for something like OSR.
403 osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
404 if (osr_data == nullptr) {
405 return nullptr;
406 }
407 memset(osr_data, 0, sizeof(OsrData) + frame_size);
408 osr_data->frame_size = frame_size;
409
410 // Art ABI: ArtMethod is at the bottom of the stack.
411 osr_data->memory[0] = method;
412
413 if (vreg_map.empty()) {
414 // If we don't have a dex register map, then there are no live dex registers at
415 // this dex pc.
416 } else {
417 for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
418 DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
419 if (location == DexRegisterLocation::Kind::kNone) {
420 // Dex register is dead or uninitialized.
421 continue;
422 }
423
424 if (location == DexRegisterLocation::Kind::kConstant) {
425 // We skip constants because the compiled code knows how to handle them.
426 continue;
427 }
428
429 DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
430
431 int32_t vreg_value = vregs[vreg];
432 int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
433 DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
434 DCHECK_GT(slot_offset, 0);
435 (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
436 }
437 }
438
439 osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeQuickCodeISA) +
440 osr_method->GetEntryPoint();
441 VLOG(jit) << "Jumping to "
442 << method_name
443 << "@"
444 << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
445 }
446 return osr_data;
447 }
448
MaybeDoOnStackReplacement(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset,JValue * result)449 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
450 ArtMethod* method,
451 uint32_t dex_pc,
452 int32_t dex_pc_offset,
453 JValue* result) {
454 Jit* jit = Runtime::Current()->GetJit();
455 if (jit == nullptr) {
456 return false;
457 }
458
459 if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd<kNativeStackType>())) {
460 // Don't attempt to do an OSR if we are close to the stack limit. Since
461 // the interpreter frames are still on stack, OSR has the potential
462 // to stack overflow even for a simple loop.
463 // b/27094810.
464 return false;
465 }
466
467 // Get the actual Java method if this method is from a proxy class. The compiler
468 // and the JIT code cache do not expect methods from proxy classes.
469 method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
470
471 // Before allowing the jump, make sure no code is actively inspecting the method to avoid
472 // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
473 // disable OSR when single stepping, but that's currently hard to know at this point.
474 // Currently, HaveLocalsChanged is not frame specific. It is possible to make it frame specific
475 // to allow OSR of frames that don't have any locals changed but it isn't worth the additional
476 // complexity.
477 if (Runtime::Current()->GetInstrumentation()->NeedsSlowInterpreterForMethod(thread, method) ||
478 Runtime::Current()->GetRuntimeCallbacks()->HaveLocalsChanged()) {
479 return false;
480 }
481
482 ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
483 OsrData* osr_data = jit->PrepareForOsr(method,
484 dex_pc + dex_pc_offset,
485 shadow_frame->GetVRegArgs(0));
486
487 if (osr_data == nullptr) {
488 return false;
489 }
490
491 {
492 thread->PopShadowFrame();
493 ManagedStack fragment;
494 thread->PushManagedStackFragment(&fragment);
495 (*art_quick_osr_stub)(osr_data->memory,
496 osr_data->frame_size,
497 osr_data->native_pc,
498 result,
499 method->GetShorty(),
500 thread);
501
502 if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
503 thread->DeoptimizeWithDeoptimizationException(result);
504 }
505 thread->PopManagedStackFragment(fragment);
506 }
507 free(osr_data);
508 thread->PushShadowFrame(shadow_frame);
509 VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
510 return true;
511 }
512
AddMemoryUsage(ArtMethod * method,size_t bytes)513 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
514 if (bytes > 4 * MB) {
515 LOG(INFO) << "Compiler allocated "
516 << PrettySize(bytes)
517 << " to compile "
518 << ArtMethod::PrettyMethod(method);
519 }
520 MutexLock mu(Thread::Current(), lock_);
521 memory_use_.AddValue(bytes);
522 }
523
NotifyZygoteCompilationDone()524 void Jit::NotifyZygoteCompilationDone() {
525 if (fd_methods_ == -1) {
526 return;
527 }
528
529 size_t offset = 0;
530 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
531 const ImageHeader& header = space->GetImageHeader();
532 const ImageSection& section = header.GetMethodsSection();
533 // Because mremap works at page boundaries, we can only handle methods
534 // within a page range. For methods that falls above or below the range,
535 // the child processes will copy their contents to their private mapping
536 // in `child_mapping_methods`. See `MapBootImageMethods`.
537 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
538 uint8_t* page_end =
539 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
540 if (page_end > page_start) {
541 uint64_t capacity = page_end - page_start;
542 memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
543 offset += capacity;
544 }
545 }
546
547 // Do an msync to ensure we are not affected by writes still being in caches.
548 if (msync(zygote_mapping_methods_.Begin(), fd_methods_size_, MS_SYNC) != 0) {
549 PLOG(WARNING) << "Failed to sync boot image methods memory";
550 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
551 return;
552 }
553
554 // We don't need the shared mapping anymore, and we need to drop it in case
555 // the file hasn't been sealed writable.
556 zygote_mapping_methods_ = MemMap::Invalid();
557
558 // Seal writes now. Zygote and children will map the memory private in order
559 // to write to it.
560 if (fcntl(fd_methods_, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_WRITE) == -1) {
561 PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
562 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
563 return;
564 }
565
566 std::string error_str;
567 MemMap child_mapping_methods = MemMap::MapFile(
568 fd_methods_size_,
569 PROT_READ | PROT_WRITE,
570 MAP_PRIVATE,
571 fd_methods_,
572 /* start= */ 0,
573 /* low_4gb= */ false,
574 "boot-image-methods",
575 /* reuse= */ true, // The mapping will be reused by the mremaps below.
576 &error_str);
577
578 if (!child_mapping_methods.IsValid()) {
579 LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
580 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
581 return;
582 }
583
584 // Ensure the contents are the same as before: there was a window between
585 // the memcpy and the sealing where other processes could have changed the
586 // contents.
587 // Note this would not be needed if we could have used F_SEAL_FUTURE_WRITE,
588 // see b/143833776.
589 offset = 0;
590 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
591 const ImageHeader& header = space->GetImageHeader();
592 const ImageSection& section = header.GetMethodsSection();
593 // Because mremap works at page boundaries, we can only handle methods
594 // within a page range. For methods that falls above or below the range,
595 // the child processes will copy their contents to their private mapping
596 // in `child_mapping_methods`. See `MapBootImageMethods`.
597 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
598 uint8_t* page_end =
599 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
600 if (page_end > page_start) {
601 uint64_t capacity = page_end - page_start;
602 if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
603 LOG(WARNING) << "Contents differ in boot image methods data";
604 code_cache_->GetZygoteMap()->SetCompilationState(
605 ZygoteCompilationState::kNotifiedFailure);
606 return;
607 }
608 offset += capacity;
609 }
610 }
611
612 // Future spawned processes don't need the fd anymore.
613 fd_methods_.reset();
614
615 // In order to have the zygote and children share the memory, we also remap
616 // the memory into the zygote process.
617 offset = 0;
618 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
619 const ImageHeader& header = space->GetImageHeader();
620 const ImageSection& section = header.GetMethodsSection();
621 // Because mremap works at page boundaries, we can only handle methods
622 // within a page range. For methods that falls above or below the range,
623 // the child processes will copy their contents to their private mapping
624 // in `child_mapping_methods`. See `MapBootImageMethods`.
625 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
626 uint8_t* page_end =
627 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
628 if (page_end > page_start) {
629 uint64_t capacity = page_end - page_start;
630 if (mremap(child_mapping_methods.Begin() + offset,
631 capacity,
632 capacity,
633 MREMAP_FIXED | MREMAP_MAYMOVE,
634 page_start) == MAP_FAILED) {
635 // Failing to remap is safe as the process will just use the old
636 // contents.
637 PLOG(WARNING) << "Failed mremap of boot image methods of " << space->GetImageFilename();
638 }
639 offset += capacity;
640 }
641 }
642
643 LOG(INFO) << "Successfully notified child processes on sharing boot image methods";
644
645 // Mark that compilation of boot classpath is done, and memory can now be
646 // shared. Other processes will pick up this information.
647 code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedOk);
648 }
649
650 class JitCompileTask final : public Task {
651 public:
652 enum class TaskKind {
653 kCompile,
654 kPreCompile,
655 };
656
JitCompileTask(ArtMethod * method,TaskKind task_kind,CompilationKind compilation_kind)657 JitCompileTask(ArtMethod* method,
658 TaskKind task_kind,
659 CompilationKind compilation_kind)
660 : method_(method),
661 kind_(task_kind),
662 compilation_kind_(compilation_kind) {
663 }
664
Run(Thread * self)665 void Run(Thread* self) override {
666 {
667 ScopedObjectAccess soa(self);
668 switch (kind_) {
669 case TaskKind::kCompile:
670 case TaskKind::kPreCompile: {
671 Runtime::Current()->GetJit()->CompileMethodInternal(
672 method_,
673 self,
674 compilation_kind_,
675 /* prejit= */ (kind_ == TaskKind::kPreCompile));
676 break;
677 }
678 }
679 }
680 ProfileSaver::NotifyJitActivity();
681 }
682
Finalize()683 void Finalize() override {
684 JitThreadPool* thread_pool = Runtime::Current()->GetJit()->GetThreadPool();
685 if (thread_pool != nullptr) {
686 thread_pool->Remove(this);
687 }
688 delete this;
689 }
690
GetArtMethod() const691 ArtMethod* GetArtMethod() const {
692 return method_;
693 }
694
GetCompilationKind() const695 CompilationKind GetCompilationKind() const {
696 return compilation_kind_;
697 }
698
699 private:
700 ArtMethod* const method_;
701 const TaskKind kind_;
702 const CompilationKind compilation_kind_;
703
704 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
705 };
706
GetProfileFile(const std::string & dex_location)707 static std::string GetProfileFile(const std::string& dex_location) {
708 // Hardcoded assumption where the profile file is.
709 // TODO(ngeoffray): this is brittle and we would need to change change if we
710 // wanted to do more eager JITting of methods in a profile. This is
711 // currently only for system server.
712 return dex_location + ".prof";
713 }
714
GetBootProfileFile(const std::string & profile)715 static std::string GetBootProfileFile(const std::string& profile) {
716 // The boot profile can be found next to the compilation profile, with a
717 // different extension.
718 return ReplaceFileExtension(profile, "bprof");
719 }
720
721 // Return whether the address is guaranteed to be backed by a file or is shared.
722 // This information can be used to know whether MADV_DONTNEED will make
723 // following accesses repopulate the memory or return zero.
IsAddressKnownBackedByFileOrShared(const void * addr)724 static bool IsAddressKnownBackedByFileOrShared(const void* addr) {
725 // We use the Linux pagemap interface for knowing if an address is backed
726 // by a file or is shared. See:
727 // https://www.kernel.org/doc/Documentation/vm/pagemap.txt
728 const size_t page_size = MemMap::GetPageSize();
729 uintptr_t vmstart = reinterpret_cast<uintptr_t>(AlignDown(addr, page_size));
730 off_t index = (vmstart / page_size) * sizeof(uint64_t);
731 android::base::unique_fd pagemap(open("/proc/self/pagemap", O_RDONLY | O_CLOEXEC));
732 if (pagemap == -1) {
733 return false;
734 }
735 if (lseek(pagemap, index, SEEK_SET) != index) {
736 return false;
737 }
738 uint64_t flags;
739 if (read(pagemap, &flags, sizeof(uint64_t)) != sizeof(uint64_t)) {
740 return false;
741 }
742 // From https://www.kernel.org/doc/Documentation/vm/pagemap.txt:
743 // * Bit 61 page is file-page or shared-anon (since 3.5)
744 return (flags & (1LL << 61)) != 0;
745 }
746
747 /**
748 * A JIT task to run after all profile compilation is done.
749 */
750 class JitDoneCompilingProfileTask final : public SelfDeletingTask {
751 public:
JitDoneCompilingProfileTask(const std::vector<const DexFile * > & dex_files)752 explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
753 : dex_files_(dex_files) {}
754
Run(Thread * self)755 void Run([[maybe_unused]] Thread* self) override {
756 // Madvise DONTNEED dex files now that we're done compiling methods.
757 for (const DexFile* dex_file : dex_files_) {
758 if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
759 int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), gPageSize)),
760 RoundUp(dex_file->Size(), gPageSize),
761 MADV_DONTNEED);
762 if (result == -1) {
763 PLOG(WARNING) << "Madvise failed";
764 }
765 }
766 }
767 }
768
769 private:
770 std::vector<const DexFile*> dex_files_;
771
772 DISALLOW_COPY_AND_ASSIGN(JitDoneCompilingProfileTask);
773 };
774
775 class JitZygoteDoneCompilingTask final : public SelfDeletingTask {
776 public:
JitZygoteDoneCompilingTask()777 JitZygoteDoneCompilingTask() {}
778
Run(Thread * self)779 void Run([[maybe_unused]] Thread* self) override {
780 DCHECK(Runtime::Current()->IsZygote());
781 Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
782 ZygoteCompilationState::kDone);
783 }
784
785 private:
786 DISALLOW_COPY_AND_ASSIGN(JitZygoteDoneCompilingTask);
787 };
788
789 /**
790 * A JIT task to run Java verification of boot classpath classes that were not
791 * verified at compile-time.
792 */
793 class ZygoteVerificationTask final : public Task {
794 public:
ZygoteVerificationTask()795 ZygoteVerificationTask() {}
796
Run(Thread * self)797 void Run(Thread* self) override {
798 // We are going to load class and run verification, which may also need to load
799 // classes. If the thread cannot load classes (typically when the runtime is
800 // debuggable), then just return.
801 if (!self->CanLoadClasses()) {
802 return;
803 }
804 Runtime* runtime = Runtime::Current();
805 ClassLinker* linker = runtime->GetClassLinker();
806 const std::vector<const DexFile*>& boot_class_path =
807 runtime->GetClassLinker()->GetBootClassPath();
808 ScopedObjectAccess soa(self);
809 StackHandleScope<2> hs(self);
810 MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
811 MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
812 uint64_t start_ns = ThreadCpuNanoTime();
813 uint64_t number_of_classes = 0;
814 for (const DexFile* dex_file : boot_class_path) {
815 dex_cache.Assign(linker->FindDexCache(self, *dex_file));
816 for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
817 const dex::ClassDef& class_def = dex_file->GetClassDef(i);
818 klass.Assign(linker->LookupResolvedType(
819 class_def.class_idx_, dex_cache.Get(), /* class_loader= */ nullptr));
820 if (klass == nullptr) {
821 // Class not loaded yet.
822 DCHECK(!self->IsExceptionPending());
823 continue;
824 }
825 if (klass->IsVerified()) {
826 continue;
827 }
828 if (linker->VerifyClass(self, /* verifier_deps= */ nullptr, klass) ==
829 verifier::FailureKind::kHardFailure) {
830 CHECK(self->IsExceptionPending());
831 LOG(WARNING) << "Methods in the boot classpath failed to verify: "
832 << self->GetException()->Dump();
833 self->ClearException();
834 } else {
835 ++number_of_classes;
836 }
837 CHECK(!self->IsExceptionPending());
838 }
839 }
840 LOG(INFO) << "Background verification of "
841 << number_of_classes
842 << " classes from boot classpath took "
843 << PrettyDuration(ThreadCpuNanoTime() - start_ns);
844 }
845 };
846
847 class ZygoteTask final : public Task {
848 public:
ZygoteTask()849 ZygoteTask() {}
850
Run(Thread * self)851 void Run(Thread* self) override {
852 Runtime* runtime = Runtime::Current();
853 uint32_t added_to_queue = 0;
854 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
855 const std::vector<const DexFile*>& boot_class_path =
856 runtime->GetClassLinker()->GetBootClassPath();
857 ScopedNullHandle<mirror::ClassLoader> null_handle;
858 // We avoid doing compilation at boot for the secondary zygote, as apps forked from it are not
859 // critical for boot.
860 if (Runtime::Current()->IsPrimaryZygote()) {
861 for (const std::string& profile_file : space->GetProfileFiles()) {
862 std::string boot_profile = GetBootProfileFile(profile_file);
863 LOG(INFO) << "JIT Zygote looking at boot profile " << boot_profile;
864
865 // We add to the queue for zygote so that we can fork processes in-between compilations.
866 added_to_queue += runtime->GetJit()->CompileMethodsFromBootProfile(
867 self, boot_class_path, boot_profile, null_handle, /* add_to_queue= */ true);
868 }
869 }
870 for (const std::string& profile_file : space->GetProfileFiles()) {
871 LOG(INFO) << "JIT Zygote looking at profile " << profile_file;
872
873 added_to_queue += runtime->GetJit()->CompileMethodsFromProfile(
874 self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
875 }
876 }
877 DCHECK(runtime->GetJit()->InZygoteUsingJit());
878 runtime->GetJit()->AddPostBootTask(self, new JitZygoteDoneCompilingTask());
879
880 JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
881 code_cache->GetZygoteMap()->Initialize(added_to_queue);
882 }
883
Finalize()884 void Finalize() override {
885 delete this;
886 }
887
888 private:
889 DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
890 };
891
892 class JitProfileTask final : public Task {
893 public:
JitProfileTask(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)894 JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
895 jobject class_loader) {
896 ScopedObjectAccess soa(Thread::Current());
897 StackHandleScope<1> hs(soa.Self());
898 Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
899 soa.Decode<mirror::ClassLoader>(class_loader)));
900 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
901 for (const auto& dex_file : dex_files) {
902 dex_files_.push_back(dex_file.get());
903 // Register the dex file so that we can guarantee it doesn't get deleted
904 // while reading it during the task.
905 class_linker->RegisterDexFile(*dex_file.get(), h_loader.Get());
906 }
907 // We also create our own global ref to use this class loader later.
908 class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), h_loader.Get());
909 }
910
Run(Thread * self)911 void Run(Thread* self) override {
912 ScopedObjectAccess soa(self);
913 StackHandleScope<1> hs(self);
914 Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
915 soa.Decode<mirror::ClassLoader>(class_loader_));
916
917 std::string profile = GetProfileFile(dex_files_[0]->GetLocation());
918 std::string boot_profile = GetBootProfileFile(profile);
919
920 Jit* jit = Runtime::Current()->GetJit();
921
922 jit->CompileMethodsFromBootProfile(
923 self,
924 dex_files_,
925 boot_profile,
926 loader,
927 /* add_to_queue= */ false);
928
929 jit->CompileMethodsFromProfile(
930 self,
931 dex_files_,
932 profile,
933 loader,
934 /* add_to_queue= */ true);
935 }
936
Finalize()937 void Finalize() override {
938 delete this;
939 }
940
~JitProfileTask()941 ~JitProfileTask() {
942 ScopedObjectAccess soa(Thread::Current());
943 soa.Vm()->DeleteGlobalRef(soa.Self(), class_loader_);
944 }
945
946 private:
947 std::vector<const DexFile*> dex_files_;
948 jobject class_loader_;
949
950 DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
951 };
952
CopyIfDifferent(void * s1,const void * s2,size_t n)953 static void CopyIfDifferent(void* s1, const void* s2, size_t n) {
954 if (memcmp(s1, s2, n) != 0) {
955 memcpy(s1, s2, n);
956 }
957 }
958
MapBootImageMethods()959 void Jit::MapBootImageMethods() {
960 if (Runtime::Current()->IsJavaDebuggable()) {
961 LOG(INFO) << "Not mapping boot image methods due to process being debuggable";
962 return;
963 }
964 CHECK_NE(fd_methods_.get(), -1);
965 if (!code_cache_->GetZygoteMap()->CanMapBootImageMethods()) {
966 LOG(WARNING) << "Not mapping boot image methods due to error from zygote";
967 // We don't need the fd anymore.
968 fd_methods_.reset();
969 return;
970 }
971
972 std::string error_str;
973 MemMap child_mapping_methods = MemMap::MapFile(
974 fd_methods_size_,
975 PROT_READ | PROT_WRITE,
976 MAP_PRIVATE,
977 fd_methods_,
978 /* start= */ 0,
979 /* low_4gb= */ false,
980 "boot-image-methods",
981 /* reuse= */ true, // The mapping will be reused by the mremaps below.
982 &error_str);
983
984 // We don't need the fd anymore.
985 fd_methods_.reset();
986
987 if (!child_mapping_methods.IsValid()) {
988 LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
989 return;
990 }
991 // We are going to mremap the child mapping into the image:
992 //
993 // ImageSection ChildMappingMethods
994 //
995 // section start --> -----------
996 // | |
997 // | |
998 // page_start --> | | <----- -----------
999 // | | | |
1000 // | | | |
1001 // | | | |
1002 // | | | |
1003 // | | | |
1004 // | | | |
1005 // | | | |
1006 // page_end --> | | <----- -----------
1007 // | |
1008 // section end --> -----------
1009 //
1010 size_t offset = 0;
1011 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1012 const ImageHeader& header = space->GetImageHeader();
1013 const ImageSection& section = header.GetMethodsSection();
1014 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
1015 uint8_t* page_end =
1016 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
1017 if (page_end <= page_start) {
1018 // Section doesn't contain one aligned entire page.
1019 continue;
1020 }
1021 uint64_t capacity = page_end - page_start;
1022 // Walk over methods in the boot image, and check for:
1023 // 1) methods whose class is not initialized in the process, but are in the
1024 // zygote process. For such methods, we need their entrypoints to be stubs
1025 // that do the initialization check.
1026 // 2) native methods whose data pointer is different than the one in the
1027 // zygote. Such methods may have had custom native implementation provided
1028 // by JNI RegisterNatives.
1029 header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1030 // Methods in the boot image should never have their single
1031 // implementation flag set (and therefore never have a `data_` pointing
1032 // to an ArtMethod for single implementation).
1033 CHECK(method.IsIntrinsic() || !method.HasSingleImplementationFlag());
1034 if (method.IsRuntimeMethod()) {
1035 return;
1036 }
1037
1038 // Pointer to the method we're currently using.
1039 uint8_t* pointer = reinterpret_cast<uint8_t*>(&method);
1040 // The data pointer of that method that we want to keep.
1041 uint8_t* data_pointer = pointer + ArtMethod::DataOffset(kRuntimePointerSize).Int32Value();
1042 if (method.IsNative() && data_pointer >= page_start && data_pointer < page_end) {
1043 // The data pointer of the ArtMethod in the shared memory we are going to remap into our
1044 // own mapping. This is the data that we will see after the remap.
1045 uint8_t* new_data_pointer =
1046 child_mapping_methods.Begin() + offset + (data_pointer - page_start);
1047 CopyIfDifferent(new_data_pointer, data_pointer, sizeof(void*));
1048 }
1049
1050 // The entrypoint of the method we're currently using and that we want to
1051 // keep.
1052 uint8_t* entry_point_pointer = pointer +
1053 ArtMethod::EntryPointFromQuickCompiledCodeOffset(kRuntimePointerSize).Int32Value();
1054 if (!method.GetDeclaringClassUnchecked()->IsVisiblyInitialized() &&
1055 method.IsStatic() &&
1056 !method.IsConstructor() &&
1057 entry_point_pointer >= page_start &&
1058 entry_point_pointer < page_end) {
1059 // The entry point of the ArtMethod in the shared memory we are going to remap into our
1060 // own mapping. This is the entrypoint that we will see after the remap.
1061 uint8_t* new_entry_point_pointer =
1062 child_mapping_methods.Begin() + offset + (entry_point_pointer - page_start);
1063 CopyIfDifferent(new_entry_point_pointer, entry_point_pointer, sizeof(void*));
1064 }
1065 }, space->Begin(), kRuntimePointerSize);
1066
1067 // Map the memory in the boot image range.
1068 if (mremap(child_mapping_methods.Begin() + offset,
1069 capacity,
1070 capacity,
1071 MREMAP_FIXED | MREMAP_MAYMOVE,
1072 page_start) == MAP_FAILED) {
1073 PLOG(WARNING) << "Fail to mremap boot image methods for " << space->GetImageFilename();
1074 }
1075 offset += capacity;
1076 }
1077
1078 LOG(INFO) << "Successfully mapped boot image methods";
1079 }
1080
InZygoteUsingJit()1081 bool Jit::InZygoteUsingJit() {
1082 Runtime* runtime = Runtime::Current();
1083 return runtime->IsZygote() && runtime->HasImageWithProfile() && runtime->UseJitCompilation();
1084 }
1085
CreateThreadPool()1086 void Jit::CreateThreadPool() {
1087 // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
1088 // is not null when we instrument.
1089
1090 thread_pool_.reset(JitThreadPool::Create("Jit thread pool", 1));
1091
1092 Runtime* runtime = Runtime::Current();
1093 thread_pool_->SetPthreadPriority(
1094 runtime->IsZygote()
1095 ? options_->GetZygoteThreadPoolPthreadPriority()
1096 : options_->GetThreadPoolPthreadPriority());
1097 Start();
1098
1099 if (runtime->IsZygote()) {
1100 // To speed up class lookups, generate a type lookup table for
1101 // dex files not backed by oat file.
1102 for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
1103 if (dex_file->GetOatDexFile() == nullptr) {
1104 TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
1105 type_lookup_tables_.push_back(
1106 std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
1107 dex_file->SetOatDexFile(type_lookup_tables_.back().get());
1108 }
1109 }
1110
1111 // Add a task that will verify boot classpath jars that were not
1112 // pre-compiled.
1113 thread_pool_->AddTask(Thread::Current(), new ZygoteVerificationTask());
1114 }
1115
1116 if (InZygoteUsingJit()) {
1117 // If we have an image with a profile, request a JIT task to
1118 // compile all methods in that profile.
1119 thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
1120
1121 // And create mappings to share boot image methods memory from the zygote to
1122 // child processes.
1123
1124 // Compute the total capacity required for the boot image methods.
1125 uint64_t total_capacity = 0;
1126 for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
1127 const ImageHeader& header = space->GetImageHeader();
1128 const ImageSection& section = header.GetMethodsSection();
1129 // Mappings need to be at the page level.
1130 uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
1131 uint8_t* page_end =
1132 AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
1133 if (page_end > page_start) {
1134 total_capacity += (page_end - page_start);
1135 }
1136 }
1137
1138 // Create the child and zygote mappings to the boot image methods.
1139 if (total_capacity > 0) {
1140 // Start with '/boot' and end with '.art' to match the pattern recognized
1141 // by android_os_Debug.cpp for boot images.
1142 const char* name = "/boot-image-methods.art";
1143 unique_fd mem_fd =
1144 unique_fd(art::memfd_create(name, /* flags= */ MFD_ALLOW_SEALING | MFD_CLOEXEC));
1145 if (mem_fd.get() == -1) {
1146 PLOG(WARNING) << "Could not create boot image methods file descriptor";
1147 return;
1148 }
1149 if (ftruncate(mem_fd.get(), total_capacity) != 0) {
1150 PLOG(WARNING) << "Failed to truncate boot image methods file to " << total_capacity;
1151 return;
1152 }
1153 std::string error_str;
1154
1155 // Create the shared mapping eagerly, as this prevents other processes
1156 // from adding the writable seal.
1157 zygote_mapping_methods_ = MemMap::MapFile(
1158 total_capacity,
1159 PROT_READ | PROT_WRITE,
1160 MAP_SHARED,
1161 mem_fd,
1162 /* start= */ 0,
1163 /* low_4gb= */ false,
1164 "boot-image-methods",
1165 &error_str);
1166
1167 if (!zygote_mapping_methods_.IsValid()) {
1168 LOG(WARNING) << "Failed to create zygote mapping of boot image methods: " << error_str;
1169 return;
1170 }
1171 if (zygote_mapping_methods_.MadviseDontFork() != 0) {
1172 LOG(WARNING) << "Failed to madvise dont fork boot image methods";
1173 zygote_mapping_methods_ = MemMap();
1174 return;
1175 }
1176
1177 // We should use the F_SEAL_FUTURE_WRITE flag, but this has unexpected
1178 // behavior on private mappings after fork (the mapping becomes shared between
1179 // parent and children), see b/143833776.
1180 // We will seal the write once we are done writing to the shared mapping.
1181 if (fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW) == -1) {
1182 PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
1183 zygote_mapping_methods_ = MemMap();
1184 return;
1185 }
1186 fd_methods_ = unique_fd(mem_fd.release());
1187 fd_methods_size_ = total_capacity;
1188 }
1189 }
1190 }
1191
RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>> & dex_files,jobject class_loader)1192 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
1193 jobject class_loader) {
1194 if (dex_files.empty()) {
1195 return;
1196 }
1197 Runtime* runtime = Runtime::Current();
1198 // If the runtime is debuggable, don't bother precompiling methods.
1199 // If system server is being profiled, don't precompile as we are going to use
1200 // the JIT to count hotness. Note that --count-hotness-in-compiled-code is
1201 // only forced when we also profile the boot classpath, see
1202 // AndroidRuntime.cpp.
1203 if (runtime->IsSystemServer() &&
1204 UseJitCompilation() &&
1205 options_->UseProfiledJitCompilation() &&
1206 runtime->HasImageWithProfile() &&
1207 !runtime->IsSystemServerProfiled() &&
1208 !runtime->IsJavaDebuggable()) {
1209 // Note: this precompilation is currently not running in production because:
1210 // - UseProfiledJitCompilation() is not set by default.
1211 // - System server dex files are registered *before* we set the runtime as
1212 // system server (though we are in the system server process).
1213 thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
1214 }
1215 }
1216
AddCompileTask(Thread * self,ArtMethod * method,CompilationKind compilation_kind)1217 void Jit::AddCompileTask(Thread* self,
1218 ArtMethod* method,
1219 CompilationKind compilation_kind) {
1220 thread_pool_->AddTask(self, method, compilation_kind);
1221 }
1222
CompileMethodFromProfile(Thread * self,ClassLinker * class_linker,uint32_t method_idx,Handle<mirror::DexCache> dex_cache,Handle<mirror::ClassLoader> class_loader,bool add_to_queue,bool compile_after_boot)1223 bool Jit::CompileMethodFromProfile(Thread* self,
1224 ClassLinker* class_linker,
1225 uint32_t method_idx,
1226 Handle<mirror::DexCache> dex_cache,
1227 Handle<mirror::ClassLoader> class_loader,
1228 bool add_to_queue,
1229 bool compile_after_boot) {
1230 ArtMethod* method = class_linker->ResolveMethodId(method_idx, dex_cache, class_loader);
1231 if (method == nullptr) {
1232 self->ClearException();
1233 return false;
1234 }
1235 if (!method->IsCompilable() || !method->IsInvokable()) {
1236 return false;
1237 }
1238 if (method->IsPreCompiled()) {
1239 // Already seen by another profile.
1240 return false;
1241 }
1242 CompilationKind compilation_kind = CompilationKind::kOptimized;
1243 const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1244 if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
1245 class_linker->IsQuickGenericJniStub(entry_point) ||
1246 class_linker->IsNterpEntryPoint(entry_point) ||
1247 // We explicitly check for the resolution stub, and not the resolution trampoline.
1248 // The trampoline is for methods backed by a .oat file that has a compiled version of
1249 // the method.
1250 (entry_point == GetQuickResolutionStub())) {
1251 VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
1252 << " from profile";
1253 method->SetPreCompiled();
1254 if (!add_to_queue) {
1255 CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
1256 } else {
1257 Task* task = new JitCompileTask(
1258 method, JitCompileTask::TaskKind::kPreCompile, compilation_kind);
1259 if (compile_after_boot) {
1260 AddPostBootTask(self, task);
1261 } else {
1262 thread_pool_->AddTask(self, task);
1263 }
1264 return true;
1265 }
1266 }
1267 return false;
1268 }
1269
CompileMethodsFromBootProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1270 uint32_t Jit::CompileMethodsFromBootProfile(
1271 Thread* self,
1272 const std::vector<const DexFile*>& dex_files,
1273 const std::string& profile_file,
1274 Handle<mirror::ClassLoader> class_loader,
1275 bool add_to_queue) {
1276 unix_file::FdFile profile(profile_file, O_RDONLY, true);
1277
1278 if (profile.Fd() == -1) {
1279 PLOG(WARNING) << "No boot profile: " << profile_file;
1280 return 0u;
1281 }
1282
1283 ProfileBootInfo profile_info;
1284 if (!profile_info.Load(profile.Fd(), dex_files)) {
1285 LOG(ERROR) << "Could not load profile file: " << profile_file;
1286 return 0u;
1287 }
1288
1289 ScopedObjectAccess soa(self);
1290 VariableSizedHandleScope handles(self);
1291 std::vector<Handle<mirror::DexCache>> dex_caches;
1292 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1293 for (const DexFile* dex_file : profile_info.GetDexFiles()) {
1294 dex_caches.push_back(handles.NewHandle(class_linker->FindDexCache(self, *dex_file)));
1295 }
1296
1297 uint32_t added_to_queue = 0;
1298 for (const std::pair<uint32_t, uint32_t>& pair : profile_info.GetMethods()) {
1299 if (CompileMethodFromProfile(self,
1300 class_linker,
1301 pair.second,
1302 dex_caches[pair.first],
1303 class_loader,
1304 add_to_queue,
1305 /*compile_after_boot=*/false)) {
1306 ++added_to_queue;
1307 }
1308 }
1309 return added_to_queue;
1310 }
1311
CompileMethodsFromProfile(Thread * self,const std::vector<const DexFile * > & dex_files,const std::string & profile_file,Handle<mirror::ClassLoader> class_loader,bool add_to_queue)1312 uint32_t Jit::CompileMethodsFromProfile(
1313 Thread* self,
1314 const std::vector<const DexFile*>& dex_files,
1315 const std::string& profile_file,
1316 Handle<mirror::ClassLoader> class_loader,
1317 bool add_to_queue) {
1318
1319 if (profile_file.empty()) {
1320 LOG(WARNING) << "Expected a profile file in JIT zygote mode";
1321 return 0u;
1322 }
1323
1324 // We don't generate boot profiles on device, therefore we don't
1325 // need to lock the file.
1326 unix_file::FdFile profile(profile_file, O_RDONLY, true);
1327
1328 if (profile.Fd() == -1) {
1329 PLOG(WARNING) << "No profile: " << profile_file;
1330 return 0u;
1331 }
1332
1333 ProfileCompilationInfo profile_info(/* for_boot_image= */ class_loader.IsNull());
1334 if (!profile_info.Load(profile.Fd())) {
1335 LOG(ERROR) << "Could not load profile file";
1336 return 0u;
1337 }
1338 ScopedObjectAccess soa(self);
1339 StackHandleScope<1> hs(self);
1340 MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
1341 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1342 uint32_t added_to_queue = 0u;
1343 for (const DexFile* dex_file : dex_files) {
1344 std::set<dex::TypeIndex> class_types;
1345 std::set<uint16_t> all_methods;
1346 if (!profile_info.GetClassesAndMethods(*dex_file,
1347 &class_types,
1348 &all_methods,
1349 &all_methods,
1350 &all_methods)) {
1351 // This means the profile file did not reference the dex file, which is the case
1352 // if there's no classes and methods of that dex file in the profile.
1353 continue;
1354 }
1355 dex_cache.Assign(class_linker->FindDexCache(self, *dex_file));
1356 CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
1357
1358 for (uint16_t method_idx : all_methods) {
1359 if (CompileMethodFromProfile(self,
1360 class_linker,
1361 method_idx,
1362 dex_cache,
1363 class_loader,
1364 add_to_queue,
1365 /*compile_after_boot=*/true)) {
1366 ++added_to_queue;
1367 }
1368 }
1369 }
1370
1371 // Add a task to run when all compilation is done.
1372 AddPostBootTask(self, new JitDoneCompilingProfileTask(dex_files));
1373 return added_to_queue;
1374 }
1375
IgnoreSamplesForMethod(ArtMethod * method)1376 bool Jit::IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
1377 if (method->IsClassInitializer() || !method->IsCompilable()) {
1378 // We do not want to compile such methods.
1379 return true;
1380 }
1381 if (method->IsNative()) {
1382 ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
1383 if (klass == GetClassRoot<mirror::MethodHandle>() ||
1384 klass == GetClassRoot<mirror::VarHandle>()) {
1385 // MethodHandle and VarHandle invocation methods are required to throw an
1386 // UnsupportedOperationException if invoked reflectively. We achieve this by having native
1387 // implementations that raise the exception. We need to disable JIT compilation of these JNI
1388 // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
1389 // stubs. Since these stubs have different stack representations we can then crash in stack
1390 // walking (b/78151261).
1391 return true;
1392 }
1393 }
1394 return false;
1395 }
1396
EnqueueOptimizedCompilation(ArtMethod * method,Thread * self)1397 void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
1398 // Note the hotness counter will be reset by the compiled code.
1399
1400 if (thread_pool_ == nullptr) {
1401 return;
1402 }
1403
1404 const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
1405 // Check if we already have optimized code. We might still be executing baseline code even
1406 // when we have optimized code.
1407 if (GetCodeCache()->ContainsPc(entry_point) &&
1408 !CodeInfo::IsBaseline(
1409 OatQuickMethodHeader::FromEntryPoint(entry_point)->GetOptimizedCodeInfoPtr())) {
1410 return;
1411 }
1412
1413 // We arrive here after a baseline compiled code has reached its baseline
1414 // hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
1415 // task that will compile optimize the method.
1416 if (!options_->UseBaselineCompiler()) {
1417 AddCompileTask(self, method, CompilationKind::kOptimized);
1418 }
1419 }
1420
1421 class ScopedSetRuntimeThread {
1422 public:
ScopedSetRuntimeThread(Thread * self)1423 explicit ScopedSetRuntimeThread(Thread* self)
1424 : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
1425 self_->SetIsRuntimeThread(true);
1426 }
1427
~ScopedSetRuntimeThread()1428 ~ScopedSetRuntimeThread() {
1429 self_->SetIsRuntimeThread(was_runtime_thread_);
1430 }
1431
1432 private:
1433 Thread* self_;
1434 bool was_runtime_thread_;
1435 };
1436
MethodEntered(Thread * self,ArtMethod * method)1437 void Jit::MethodEntered(Thread* self, ArtMethod* method) {
1438 Runtime* runtime = Runtime::Current();
1439 if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
1440 ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
1441 if (np_method->IsCompilable()) {
1442 CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
1443 }
1444 return;
1445 }
1446
1447 AddSamples(self, method);
1448 }
1449
WaitForCompilationToFinish(Thread * self)1450 void Jit::WaitForCompilationToFinish(Thread* self) {
1451 if (thread_pool_ != nullptr) {
1452 thread_pool_->Wait(self, false, false);
1453 }
1454 }
1455
Stop()1456 void Jit::Stop() {
1457 Thread* self = Thread::Current();
1458 // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice.
1459 WaitForCompilationToFinish(self);
1460 GetThreadPool()->StopWorkers(self);
1461 WaitForCompilationToFinish(self);
1462 }
1463
Start()1464 void Jit::Start() {
1465 GetThreadPool()->StartWorkers(Thread::Current());
1466 }
1467
ScopedJitSuspend()1468 ScopedJitSuspend::ScopedJitSuspend() {
1469 jit::Jit* jit = Runtime::Current()->GetJit();
1470 was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
1471 if (was_on_) {
1472 jit->Stop();
1473 }
1474 }
1475
~ScopedJitSuspend()1476 ScopedJitSuspend::~ScopedJitSuspend() {
1477 if (was_on_) {
1478 DCHECK(Runtime::Current()->GetJit() != nullptr);
1479 DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
1480 Runtime::Current()->GetJit()->Start();
1481 }
1482 }
1483
1484 class MapBootImageMethodsTask : public gc::HeapTask {
1485 public:
MapBootImageMethodsTask(uint64_t target_run_time)1486 explicit MapBootImageMethodsTask(uint64_t target_run_time) : gc::HeapTask(target_run_time) {}
1487
Run(Thread * self ATTRIBUTE_UNUSED)1488 void Run(Thread* self ATTRIBUTE_UNUSED) override {
1489 Runtime* runtime = Runtime::Current();
1490 if (!runtime->GetJit()->GetCodeCache()->GetZygoteMap()->IsCompilationNotified()) {
1491 // Add a new task that will execute in 10 seconds.
1492 static constexpr uint64_t kWaitTimeNs = MsToNs(10000); // 10 seconds
1493 runtime->GetHeap()->AddHeapTask(new MapBootImageMethodsTask(NanoTime() + kWaitTimeNs));
1494 return;
1495 }
1496 // Prevent other threads from running while we are remapping the boot image
1497 // ArtMethod's. Native threads might still be running, but they cannot
1498 // change the contents of ArtMethod's.
1499 ScopedSuspendAll ssa(__FUNCTION__);
1500 runtime->GetJit()->MapBootImageMethods();
1501 }
1502 };
1503
PostForkChildAction(bool is_system_server,bool is_zygote)1504 void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
1505 // Clear the potential boot tasks inherited from the zygote.
1506 {
1507 MutexLock mu(Thread::Current(), boot_completed_lock_);
1508 tasks_after_boot_.clear();
1509 }
1510
1511 Runtime* const runtime = Runtime::Current();
1512 // Check if we'll need to remap the boot image methods.
1513 if (!is_zygote && fd_methods_ != -1) {
1514 Runtime::Current()->GetHeap()->AddHeapTask(
1515 new MapBootImageMethodsTask(NanoTime() + MsToNs(10000)));
1516 }
1517
1518 if (is_zygote || runtime->IsSafeMode()) {
1519 // Delete the thread pool, we are not going to JIT.
1520 thread_pool_.reset(nullptr);
1521 return;
1522 }
1523 // At this point, the compiler options have been adjusted to the particular configuration
1524 // of the forked child. Parse them again.
1525 jit_compiler_->ParseCompilerOptions();
1526
1527 // Adjust the status of code cache collection: the status from zygote was to not collect.
1528 // JitAtFirstUse compiles the methods synchronously on mutator threads. While this should work
1529 // in theory it is causing deadlocks in some jvmti tests related to Jit GC. Hence, disabling
1530 // Jit GC for now (b/147208992).
1531 code_cache_->SetGarbageCollectCode(
1532 !jit_compiler_->GenerateDebugInfo() &&
1533 !JitAtFirstUse());
1534
1535 if (is_system_server && runtime->HasImageWithProfile()) {
1536 // Disable garbage collection: we don't want it to delete methods we're compiling
1537 // through boot and system server profiles.
1538 // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
1539 code_cache_->SetGarbageCollectCode(false);
1540 }
1541
1542 // We do this here instead of PostZygoteFork, as NativeDebugInfoPostFork only
1543 // applies to a child.
1544 NativeDebugInfoPostFork();
1545 }
1546
PreZygoteFork()1547 void Jit::PreZygoteFork() {
1548 if (thread_pool_ == nullptr) {
1549 return;
1550 }
1551 thread_pool_->DeleteThreads();
1552
1553 NativeDebugInfoPreFork();
1554 }
1555
1556 // Returns the number of threads running.
GetTaskCount()1557 static int GetTaskCount() {
1558 DIR* directory = opendir("/proc/self/task");
1559 if (directory == nullptr) {
1560 return -1;
1561 }
1562
1563 uint32_t count = 0;
1564 struct dirent* entry = nullptr;
1565 while ((entry = readdir(directory)) != nullptr) {
1566 if ((strcmp(entry->d_name, ".") == 0) || (strcmp(entry->d_name, "..") == 0)) {
1567 continue;
1568 }
1569 ++count;
1570 }
1571 closedir(directory);
1572 return count;
1573 }
1574
PostZygoteFork()1575 void Jit::PostZygoteFork() {
1576 Runtime* runtime = Runtime::Current();
1577 if (thread_pool_ == nullptr) {
1578 // If this is a child zygote, check if we need to remap the boot image
1579 // methods.
1580 if (runtime->IsZygote() &&
1581 fd_methods_ != -1 &&
1582 code_cache_->GetZygoteMap()->IsCompilationNotified()) {
1583 ScopedSuspendAll ssa(__FUNCTION__);
1584 MapBootImageMethods();
1585 }
1586 return;
1587 }
1588 if (runtime->IsZygote() && code_cache_->GetZygoteMap()->IsCompilationDoneButNotNotified()) {
1589 // Copy the boot image methods data to the mappings we created to share
1590 // with the children. We do this here as we are the only thread running and
1591 // we don't risk other threads concurrently updating the ArtMethod's.
1592 CHECK_EQ(GetTaskCount(), 1);
1593 NotifyZygoteCompilationDone();
1594 CHECK(code_cache_->GetZygoteMap()->IsCompilationNotified());
1595 }
1596 thread_pool_->CreateThreads();
1597 thread_pool_->SetPthreadPriority(
1598 runtime->IsZygote()
1599 ? options_->GetZygoteThreadPoolPthreadPriority()
1600 : options_->GetThreadPoolPthreadPriority());
1601 }
1602
AddPostBootTask(Thread * self,Task * task)1603 void Jit::AddPostBootTask(Thread* self, Task* task) {
1604 MutexLock mu(self, boot_completed_lock_);
1605 if (boot_completed_) {
1606 thread_pool_->AddTask(self, task);
1607 } else {
1608 tasks_after_boot_.push_back(task);
1609 }
1610 }
1611
BootCompleted()1612 void Jit::BootCompleted() {
1613 Thread* self = Thread::Current();
1614 std::deque<Task*> tasks;
1615 {
1616 MutexLock mu(self, boot_completed_lock_);
1617 tasks = std::move(tasks_after_boot_);
1618 boot_completed_ = true;
1619 }
1620 for (Task* task : tasks) {
1621 thread_pool_->AddTask(self, task);
1622 }
1623 }
1624
CanEncodeMethod(ArtMethod * method,bool is_for_shared_region) const1625 bool Jit::CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const {
1626 return !is_for_shared_region ||
1627 Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(method->GetDeclaringClass());
1628 }
1629
CanEncodeClass(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1630 bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1631 return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls);
1632 }
1633
CanEncodeString(ObjPtr<mirror::String> string,bool is_for_shared_region) const1634 bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
1635 return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string);
1636 }
1637
CanAssumeInitialized(ObjPtr<mirror::Class> cls,bool is_for_shared_region) const1638 bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
1639 if (!is_for_shared_region) {
1640 return cls->IsInitialized();
1641 } else {
1642 // Look up the class status in the oat file.
1643 const DexFile& dex_file = *cls->GetDexCache()->GetDexFile();
1644 const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
1645 // In case we run without an image there won't be a backing oat file.
1646 if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
1647 return false;
1648 }
1649 uint16_t class_def_index = cls->GetDexClassDefIndex();
1650 return oat_dex_file->GetOatClass(class_def_index).GetStatus() >= ClassStatus::kInitialized;
1651 }
1652 }
1653
MaybeEnqueueCompilation(ArtMethod * method,Thread * self)1654 void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
1655 if (thread_pool_ == nullptr) {
1656 return;
1657 }
1658
1659 if (JitAtFirstUse()) {
1660 // Tests might request JIT on first use (compiled synchronously in the interpreter).
1661 return;
1662 }
1663
1664 if (!UseJitCompilation()) {
1665 return;
1666 }
1667
1668 if (IgnoreSamplesForMethod(method)) {
1669 return;
1670 }
1671
1672 if (GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
1673 if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
1674 // If we already have compiled code for it, nterp may be stuck in a loop.
1675 // Compile OSR.
1676 AddCompileTask(self, method, CompilationKind::kOsr);
1677 }
1678 return;
1679 }
1680
1681 // Check if we have precompiled this method.
1682 if (UNLIKELY(method->IsPreCompiled())) {
1683 if (!method->StillNeedsClinitCheck()) {
1684 const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
1685 if (entry_point != nullptr) {
1686 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
1687 }
1688 }
1689 return;
1690 }
1691
1692 static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0x3f;
1693 // Intrinsics are always in the boot image and considered hot.
1694 if (method->IsMemorySharedMethod() && !method->IsIntrinsic()) {
1695 MutexLock mu(self, lock_);
1696 auto it = shared_method_counters_.find(method);
1697 if (it == shared_method_counters_.end()) {
1698 shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1699 return;
1700 } else if (it->second != 0) {
1701 DCHECK_LE(it->second, kIndividualSharedMethodHotnessThreshold);
1702 shared_method_counters_[method] = it->second - 1;
1703 return;
1704 } else {
1705 shared_method_counters_[method] = kIndividualSharedMethodHotnessThreshold;
1706 }
1707 }
1708
1709 if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
1710 AddCompileTask(self, method, CompilationKind::kBaseline);
1711 } else {
1712 AddCompileTask(self, method, CompilationKind::kOptimized);
1713 }
1714 }
1715
CompileMethod(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit)1716 bool Jit::CompileMethod(ArtMethod* method,
1717 Thread* self,
1718 CompilationKind compilation_kind,
1719 bool prejit) {
1720 // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
1721 ScopedSetRuntimeThread ssrt(self);
1722 // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
1723 // conflicts with jitzygote optimizations.
1724 return CompileMethodInternal(method, self, compilation_kind, prejit);
1725 }
1726
GetTaskCount(Thread * self)1727 size_t JitThreadPool::GetTaskCount(Thread* self) {
1728 MutexLock mu(self, task_queue_lock_);
1729 return generic_queue_.size() +
1730 baseline_queue_.size() +
1731 optimized_queue_.size() +
1732 osr_queue_.size();
1733 }
1734
RemoveAllTasks(Thread * self)1735 void JitThreadPool::RemoveAllTasks(Thread* self) {
1736 // The ThreadPool is responsible for calling Finalize (which usually deletes
1737 // the task memory) on all the tasks.
1738 Task* task = nullptr;
1739 do {
1740 {
1741 MutexLock mu(self, task_queue_lock_);
1742 if (generic_queue_.empty()) {
1743 break;
1744 }
1745 task = generic_queue_.front();
1746 generic_queue_.pop_front();
1747 }
1748 task->Finalize();
1749 } while (true);
1750
1751 MutexLock mu(self, task_queue_lock_);
1752 baseline_queue_.clear();
1753 optimized_queue_.clear();
1754 osr_queue_.clear();
1755 }
1756
~JitThreadPool()1757 JitThreadPool::~JitThreadPool() {
1758 DeleteThreads();
1759 RemoveAllTasks(Thread::Current());
1760 }
1761
AddTask(Thread * self,Task * task)1762 void JitThreadPool::AddTask(Thread* self, Task* task) {
1763 MutexLock mu(self, task_queue_lock_);
1764 // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
1765 // the implementation of redefinition feature in jvmti.
1766 if (!started_) {
1767 task->Finalize();
1768 return;
1769 }
1770 generic_queue_.push_back(task);
1771 // If we have any waiters, signal one.
1772 if (waiting_count_ != 0) {
1773 task_queue_condition_.Signal(self);
1774 }
1775 }
1776
AddTask(Thread * self,ArtMethod * method,CompilationKind kind)1777 void JitThreadPool::AddTask(Thread* self, ArtMethod* method, CompilationKind kind) {
1778 MutexLock mu(self, task_queue_lock_);
1779 // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
1780 // the implementation of redefinition feature in jvmti.
1781 if (!started_) {
1782 return;
1783 }
1784 switch (kind) {
1785 case CompilationKind::kOsr:
1786 if (ContainsElement(osr_enqueued_methods_, method)) {
1787 return;
1788 }
1789 osr_enqueued_methods_.insert(method);
1790 osr_queue_.push_back(method);
1791 break;
1792 case CompilationKind::kBaseline:
1793 if (ContainsElement(baseline_enqueued_methods_, method)) {
1794 return;
1795 }
1796 baseline_enqueued_methods_.insert(method);
1797 baseline_queue_.push_back(method);
1798 break;
1799 case CompilationKind::kOptimized:
1800 if (ContainsElement(optimized_enqueued_methods_, method)) {
1801 return;
1802 }
1803 optimized_enqueued_methods_.insert(method);
1804 optimized_queue_.push_back(method);
1805 break;
1806 }
1807 // If we have any waiters, signal one.
1808 if (waiting_count_ != 0) {
1809 task_queue_condition_.Signal(self);
1810 }
1811 }
1812
TryGetTaskLocked()1813 Task* JitThreadPool::TryGetTaskLocked() {
1814 if (!started_) {
1815 return nullptr;
1816 }
1817
1818 // Fetch generic tasks first.
1819 if (!generic_queue_.empty()) {
1820 Task* task = generic_queue_.front();
1821 generic_queue_.pop_front();
1822 return task;
1823 }
1824
1825 // OSR requests second, then baseline and finally optimized.
1826 Task* task = FetchFrom(osr_queue_, CompilationKind::kOsr);
1827 if (task == nullptr) {
1828 task = FetchFrom(baseline_queue_, CompilationKind::kBaseline);
1829 if (task == nullptr) {
1830 task = FetchFrom(optimized_queue_, CompilationKind::kOptimized);
1831 }
1832 }
1833 return task;
1834 }
1835
FetchFrom(std::deque<ArtMethod * > & methods,CompilationKind kind)1836 Task* JitThreadPool::FetchFrom(std::deque<ArtMethod*>& methods, CompilationKind kind) {
1837 if (!methods.empty()) {
1838 ArtMethod* method = methods.front();
1839 methods.pop_front();
1840 JitCompileTask* task = new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, kind);
1841 current_compilations_.insert(task);
1842 return task;
1843 }
1844 return nullptr;
1845 }
1846
Remove(JitCompileTask * task)1847 void JitThreadPool::Remove(JitCompileTask* task) {
1848 MutexLock mu(Thread::Current(), task_queue_lock_);
1849 current_compilations_.erase(task);
1850 switch (task->GetCompilationKind()) {
1851 case CompilationKind::kOsr: {
1852 osr_enqueued_methods_.erase(task->GetArtMethod());
1853 break;
1854 }
1855 case CompilationKind::kBaseline: {
1856 baseline_enqueued_methods_.erase(task->GetArtMethod());
1857 break;
1858 }
1859 case CompilationKind::kOptimized: {
1860 optimized_enqueued_methods_.erase(task->GetArtMethod());
1861 break;
1862 }
1863 }
1864 }
1865
VisitRoots(RootVisitor * visitor)1866 void Jit::VisitRoots(RootVisitor* visitor) {
1867 if (thread_pool_ != nullptr) {
1868 thread_pool_->VisitRoots(visitor);
1869 }
1870 }
1871
VisitRoots(RootVisitor * visitor)1872 void JitThreadPool::VisitRoots(RootVisitor* visitor) {
1873 if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
1874 // In case of userfaultfd compaction, ArtMethods are updated concurrently
1875 // via linear-alloc.
1876 return;
1877 }
1878 // Fetch all ArtMethod first, to avoid holding `task_queue_lock_` for too
1879 // long.
1880 std::vector<ArtMethod*> methods;
1881 {
1882 MutexLock mu(Thread::Current(), task_queue_lock_);
1883 // We don't look at `generic_queue_` because it contains:
1884 // - Generic tasks like `ZygoteVerificationTask` which don't hold any root.
1885 // - `JitCompileTask` for precompiled methods, which we know are live, being
1886 // part of the boot classpath or system server classpath.
1887 methods.insert(methods.end(), osr_queue_.begin(), osr_queue_.end());
1888 methods.insert(methods.end(), baseline_queue_.begin(), baseline_queue_.end());
1889 methods.insert(methods.end(), optimized_queue_.begin(), optimized_queue_.end());
1890 for (JitCompileTask* task : current_compilations_) {
1891 methods.push_back(task->GetArtMethod());
1892 }
1893 }
1894 UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
1895 for (ArtMethod* method : methods) {
1896 method->VisitRoots(root_visitor, kRuntimePointerSize);
1897 }
1898 }
1899
1900 } // namespace jit
1901 } // namespace art
1902