1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger_interface.h"
18
19 #include <android-base/logging.h>
20
21 #include "base/array_ref.h"
22 #include "base/bit_utils.h"
23 #include "base/logging.h"
24 #include "base/mutex.h"
25 #include "base/time_utils.h"
26 #include "base/utils.h"
27 #include "dex/dex_file.h"
28 #include "elf/elf_debug_reader.h"
29 #include "jit/jit.h"
30 #include "jit/jit_code_cache.h"
31 #include "jit/jit_memory_region.h"
32 #include "runtime.h"
33 #include "thread-current-inl.h"
34 #include "thread.h"
35
36 #include <atomic>
37 #include <cstddef>
38
39 //
40 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
41 //
42 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
43 //
44 // There are two ways for native tools to access the debug data safely:
45 //
46 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
47 // method, which is called after every modification of the linked list.
48 // GDB does this, but it is complex to set up and it stops the process.
49 //
50 // 2) Asynchronously, using the entry seqlocks.
51 // * The seqlock is a monotonically increasing counter, which
52 // is even if the entry is valid and odd if it is invalid.
53 // It is set to even value after all other fields are set,
54 // and it is set to odd value before the entry is deleted.
55 // * This makes it possible to safely read the symfile data:
56 // * The reader should read the value of the seqlock both
57 // before and after reading the symfile. If the seqlock
58 // values match and are even the copy is consistent.
59 // * Entries are recycled, but never freed, which guarantees
60 // that the seqlock is not overwritten by a random value.
61 // * The linked-list is one level higher. The next-pointer
62 // must always point to an entry with even seqlock, which
63 // ensures that entries of a crashed process can be read.
64 // This means the entry must be added after it is created
65 // and it must be removed before it is invalidated (odd).
66 // * When iterating over the linked list the reader can use
67 // the timestamps to ensure that current and next entry
68 // were not deleted using the following steps:
69 // 1) Read next pointer and the next entry's seqlock.
70 // 2) Read the symfile and re-read the next pointer.
71 // 3) Re-read both the current and next seqlock.
72 // 4) Go to step 1 with using new entry and seqlock.
73 //
74 // 3) Asynchronously, using the global seqlock.
75 // * The seqlock is a monotonically increasing counter which is incremented
76 // before and after every modification of the linked list. Odd value of
77 // the counter means the linked list is being modified (it is locked).
78 // * The tool should read the value of the seqlock both before and after
79 // copying the linked list. If the seqlock values match and are even,
80 // the copy is consistent. Otherwise, the reader should try again.
81 // * Note that using the data directly while is it being modified
82 // might crash the tool. Therefore, the only safe way is to make
83 // a copy and use the copy only after the seqlock has been checked.
84 // * Note that the process might even free and munmap the data while
85 // it is being copied, therefore the reader should either handle
86 // SEGV or use OS calls to read the memory (e.g. process_vm_readv).
87 // * The timestamps on the entry record the time when the entry was
88 // created which is relevant if the unwinding is not live and is
89 // postponed until much later. All timestamps must be unique.
90 // * For full conformance with the C++ memory model, all seqlock
91 // protected accesses should be atomic. We currently do this in the
92 // more critical cases. The rest will have to be fixed before
93 // attempting to run TSAN on this code.
94 //
95
96 namespace art HIDDEN {
97
98 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
99 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
100
101 // Most loads and stores need no synchronization since all memory is protected by the global locks.
102 // Some writes are synchronized so libunwindstack can read the memory safely from another process.
103 constexpr std::memory_order kNonRacingRelaxed = std::memory_order_relaxed;
104
105 // Size of JIT code range covered by each packed JITCodeEntry.
106 constexpr uint32_t kJitRepackGroupSize = 64 * KB;
107
108 // Automatically call the repack method every 'n' new entries.
109 constexpr uint32_t kJitRepackFrequency = 64;
110
111 } // namespace art
112
113 // Public binary interface between ART and native tools (gdb, libunwind, etc).
114 // The fields below need to be exported and have special names as per the gdb api.
115 namespace art EXPORT {
116 extern "C" {
117 enum JITAction {
118 JIT_NOACTION = 0,
119 JIT_REGISTER_FN,
120 JIT_UNREGISTER_FN
121 };
122
123 // Public/stable binary interface.
124 struct JITCodeEntryPublic {
125 std::atomic<const JITCodeEntry*> next_; // Atomic to guarantee consistency after crash.
126 const JITCodeEntry* prev_ = nullptr; // For linked list deletion. Unused in readers.
127 const uint8_t* symfile_addr_ = nullptr; // Address of the in-memory ELF file.
128 uint64_t symfile_size_ = 0; // NB: The offset is 12 on x86 but 16 on ARM32.
129
130 // Android-specific fields:
131 uint64_t timestamp_; // CLOCK_MONOTONIC time of entry registration.
132 std::atomic_uint32_t seqlock_{1}; // Synchronization. Even value if entry is valid.
133 };
134
135 // Implementation-specific fields (which can be used only in this file).
136 struct JITCodeEntry : public JITCodeEntryPublic {
137 // Unpacked entries: Code address of the symbol in the ELF file.
138 // Packed entries: The start address of the covered memory range.
139 const void* addr_ = nullptr;
140 // Allow merging of ELF files to save space.
141 // Packing drops advanced DWARF data, so it is not always desirable.
142 bool allow_packing_ = false;
143 // Whether this entry has been LZMA compressed.
144 // Compression is expensive, so we don't always do it.
145 bool is_compressed_ = false;
146 };
147
148 // Public/stable binary interface.
149 struct JITDescriptorPublic {
150 uint32_t version_ = 1; // NB: GDB supports only version 1.
151 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
152 const JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
153 std::atomic<const JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
154
155 // Android-specific fields:
156 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '2'};
157 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
158 uint32_t sizeof_descriptor = sizeof(JITDescriptorPublic);
159 uint32_t sizeof_entry = sizeof(JITCodeEntryPublic);
160 std::atomic_uint32_t seqlock_{0}; // Incremented before and after any modification.
161 uint64_t timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
162 };
163
164 // Implementation-specific fields (which can be used only in this file).
165 struct JITDescriptor : public JITDescriptorPublic {
166 const JITCodeEntry* tail_ = nullptr; // Tail of link list of all live entries.
167 const JITCodeEntry* free_entries_ = nullptr; // List of deleted entries ready for reuse.
168
169 // Used for memory sharing with zygote. See NativeDebugInfoPreFork().
170 const JITCodeEntry* zygote_head_entry_ = nullptr;
171 JITCodeEntry application_tail_entry_{};
172 };
173
174 // Public interface: Can be used by reader to check the structs have the expected size.
175 uint32_t g_art_sizeof_jit_code_entry = sizeof(JITCodeEntryPublic);
176 uint32_t g_art_sizeof_jit_descriptor = sizeof(JITDescriptorPublic);
177
178 // Check that std::atomic has the expected layout.
179 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
180 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
181 static_assert(std::atomic_uint32_t::is_always_lock_free, "Expected to be lock free");
182 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
183 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
184 static_assert(std::atomic<void*>::is_always_lock_free, "Expected to be lock free");
185
186 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
__jit_debug_register_code()187 void __attribute__((noinline)) __jit_debug_register_code() {
188 __asm__("");
189 }
190
191 // Alternatively, native tools may overwrite this field to execute custom handler.
192 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
193
194 // The root data structure describing of all JITed methods.
GUARDED_BY(g_jit_debug_lock)195 JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
196
197 // The following globals mirror the ones above, but are used to register dex files.
__dex_debug_register_code()198 void __attribute__((noinline)) __dex_debug_register_code() {
199 __asm__("");
200 }
201 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
GUARDED_BY(g_dex_debug_lock)202 JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
203 }
204 } // namespace art
205
206 namespace art HIDDEN {
207
208 // The fields below are internal, but we keep them here anyway for consistency.
209 // Their state is related to the static state above and it must be kept in sync.
210
211 // Used only in debug builds to check that we are not adding duplicate entries.
212 static std::unordered_set<const void*> g_dcheck_all_jit_functions GUARDED_BY(g_jit_debug_lock);
213
214 // Methods that have been marked for deletion on the next repack pass.
215 static std::vector<const void*> g_removed_jit_functions GUARDED_BY(g_jit_debug_lock);
216
217 // Number of small (single symbol) ELF files. Used to trigger repacking.
218 static uint32_t g_jit_num_unpacked_entries = 0;
219
220 struct DexNativeInfo {
Lockart::DexNativeInfo221 static Mutex* Lock() RETURN_CAPABILITY(g_dex_debug_lock) { return &g_dex_debug_lock; }
222 static constexpr bool kCopySymfileData = false; // Just reference DEX files.
Descriptorart::DexNativeInfo223 static JITDescriptor& Descriptor() REQUIRES(g_dex_debug_lock) {
224 g_dex_debug_lock.AssertHeld(Thread::Current());
225 return __dex_debug_descriptor;
226 }
NotifyNativeDebuggerart::DexNativeInfo227 static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); }
Allocart::DexNativeInfo228 static const void* Alloc(size_t size) { return malloc(size); }
Freeart::DexNativeInfo229 static void Free(const void* ptr) { free(const_cast<void*>(ptr)); }
Writableart::DexNativeInfo230 template<class T> static T* Writable(const T* v) { return const_cast<T*>(v); }
231 };
232
233 struct JitNativeInfo {
Lockart::JitNativeInfo234 static Mutex* Lock() RETURN_CAPABILITY(g_jit_debug_lock) { return &g_jit_debug_lock; }
235 static constexpr bool kCopySymfileData = true; // Copy debug info to JIT memory.
Descriptorart::JitNativeInfo236 static JITDescriptor& Descriptor() REQUIRES(g_jit_debug_lock) {
237 g_jit_debug_lock.AssertHeld(Thread::Current());
238 return __jit_debug_descriptor;
239 }
NotifyNativeDebuggerart::JitNativeInfo240 static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
Allocart::JitNativeInfo241 static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
Freeart::JitNativeInfo242 static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
243 static void Free(void* ptr) = delete;
244
245 template <class T>
Writableart::JitNativeInfo246 static T* Writable(const T* v) REQUIRES(g_jit_debug_lock) {
247 // Special case: This entry is in static memory and not allocated in JIT memory.
248 if (v == reinterpret_cast<const void*>(&Descriptor().application_tail_entry_)) {
249 return const_cast<T*>(v);
250 }
251 return const_cast<T*>(Memory()->GetWritableDataAddress(v));
252 }
253
Memoryart::JitNativeInfo254 static jit::JitMemoryRegion* Memory() ASSERT_CAPABILITY(Locks::jit_lock_) {
255 Locks::jit_lock_->AssertHeld(Thread::Current());
256 jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache();
257 CHECK(jit_code_cache != nullptr);
258 jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion();
259 CHECK(memory->IsValid());
260 return memory;
261 }
262 };
263
GetJITCodeEntrySymFile(const JITCodeEntry * entry)264 ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry* entry) {
265 return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_);
266 }
267
268 // Ensure the timestamp is monotonically increasing even in presence of low
269 // granularity system timer. This ensures each entry has unique timestamp.
GetNextTimestamp(JITDescriptor & descriptor)270 static uint64_t GetNextTimestamp(JITDescriptor& descriptor) {
271 return std::max(descriptor.timestamp_ + 1, NanoTime());
272 }
273
274 // Mark the descriptor as "locked", so native tools know the data is being modified.
Seqlock(JITDescriptor & descriptor)275 static void Seqlock(JITDescriptor& descriptor) {
276 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Already locked";
277 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
278 // Ensure that any writes within the locked section cannot be reordered before the increment.
279 std::atomic_thread_fence(std::memory_order_release);
280 }
281
282 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
Sequnlock(JITDescriptor & descriptor)283 static void Sequnlock(JITDescriptor& descriptor) {
284 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Already unlocked";
285 // Ensure that any writes within the locked section cannot be reordered after the increment.
286 std::atomic_thread_fence(std::memory_order_release);
287 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
288 }
289
290 // Insert 'entry' in the linked list before 'next' and mark it as valid (append if 'next' is null).
291 // This method must be called under global lock (g_jit_debug_lock or g_dex_debug_lock).
292 template <class NativeInfo>
InsertNewEntry(const JITCodeEntry * entry,const JITCodeEntry * next)293 static void InsertNewEntry(const JITCodeEntry* entry, const JITCodeEntry* next)
294 REQUIRES(NativeInfo::Lock()) {
295 CHECK_EQ(entry->seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Expected invalid entry";
296 JITDescriptor& descriptor = NativeInfo::Descriptor();
297 const JITCodeEntry* prev = (next != nullptr ? next->prev_ : descriptor.tail_);
298 JITCodeEntry* writable = NativeInfo::Writable(entry);
299 writable->next_ = next;
300 writable->prev_ = prev;
301 writable->seqlock_.fetch_add(1, std::memory_order_release); // Mark as valid.
302 // Backward pointers should not be used by readers, so they are non-atomic.
303 if (next != nullptr) {
304 NativeInfo::Writable(next)->prev_ = entry;
305 } else {
306 descriptor.tail_ = entry;
307 }
308 // Forward pointers must be atomic and they must point to a valid entry at all times.
309 if (prev != nullptr) {
310 NativeInfo::Writable(prev)->next_.store(entry, std::memory_order_release);
311 } else {
312 descriptor.head_.store(entry, std::memory_order_release);
313 }
314 }
315
316 // This must be called with the appropriate lock taken (g_{jit,dex}_debug_lock).
317 template <class NativeInfo>
CreateJITCodeEntryInternal(ArrayRef<const uint8_t> symfile=ArrayRef<const uint8_t> (),const void * addr=nullptr,bool allow_packing=false,bool is_compressed=false)318 static const JITCodeEntry* CreateJITCodeEntryInternal(
319 ArrayRef<const uint8_t> symfile = ArrayRef<const uint8_t>(),
320 const void* addr = nullptr,
321 bool allow_packing = false,
322 bool is_compressed = false) REQUIRES(NativeInfo::Lock()) {
323 JITDescriptor& descriptor = NativeInfo::Descriptor();
324
325 // Allocate JITCodeEntry if needed.
326 if (descriptor.free_entries_ == nullptr) {
327 const void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry));
328 if (memory == nullptr) {
329 LOG(ERROR) << "Failed to allocate memory for native debug info";
330 return nullptr;
331 }
332 new (NativeInfo::Writable(memory)) JITCodeEntry();
333 descriptor.free_entries_ = reinterpret_cast<const JITCodeEntry*>(memory);
334 }
335
336 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
337 if (NativeInfo::kCopySymfileData && !symfile.empty()) {
338 const uint8_t* copy = reinterpret_cast<const uint8_t*>(NativeInfo::Alloc(symfile.size()));
339 if (copy == nullptr) {
340 LOG(ERROR) << "Failed to allocate memory for native debug info";
341 return nullptr;
342 }
343 memcpy(NativeInfo::Writable(copy), symfile.data(), symfile.size());
344 symfile = ArrayRef<const uint8_t>(copy, symfile.size());
345 }
346
347 uint64_t timestamp = GetNextTimestamp(descriptor);
348
349 // We must insert entries at specific place. See NativeDebugInfoPreFork().
350 const JITCodeEntry* next = descriptor.head_.load(kNonRacingRelaxed); // Insert at the head.
351 if (descriptor.zygote_head_entry_ != nullptr && Runtime::Current()->IsZygote()) {
352 next = nullptr; // Insert zygote entries at the tail.
353 }
354
355 // Pop entry from the free list.
356 const JITCodeEntry* entry = descriptor.free_entries_;
357 descriptor.free_entries_ = descriptor.free_entries_->next_.load(kNonRacingRelaxed);
358
359 // Create the entry and set all its fields.
360 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
361 writable_entry->symfile_addr_ = symfile.data();
362 writable_entry->symfile_size_ = symfile.size();
363 writable_entry->addr_ = addr;
364 writable_entry->allow_packing_ = allow_packing;
365 writable_entry->is_compressed_ = is_compressed;
366 writable_entry->timestamp_ = timestamp;
367
368 // Add the entry to the main linked list.
369 Seqlock(descriptor);
370 InsertNewEntry<NativeInfo>(entry, next);
371 descriptor.relevant_entry_ = entry;
372 descriptor.action_flag_ = JIT_REGISTER_FN;
373 descriptor.timestamp_ = timestamp;
374 Sequnlock(descriptor);
375
376 NativeInfo::NotifyNativeDebugger();
377
378 return entry;
379 }
380
381 template <class NativeInfo>
DeleteJITCodeEntryInternal(const JITCodeEntry * entry)382 static void DeleteJITCodeEntryInternal(const JITCodeEntry* entry) REQUIRES(NativeInfo::Lock()) {
383 CHECK(entry != nullptr);
384 JITDescriptor& descriptor = NativeInfo::Descriptor();
385
386 // Remove the entry from the main linked-list.
387 Seqlock(descriptor);
388 const JITCodeEntry* next = entry->next_.load(kNonRacingRelaxed);
389 const JITCodeEntry* prev = entry->prev_;
390 if (next != nullptr) {
391 NativeInfo::Writable(next)->prev_ = prev;
392 } else {
393 descriptor.tail_ = prev;
394 }
395 if (prev != nullptr) {
396 NativeInfo::Writable(prev)->next_.store(next, std::memory_order_relaxed);
397 } else {
398 descriptor.head_.store(next, std::memory_order_relaxed);
399 }
400 descriptor.relevant_entry_ = entry;
401 descriptor.action_flag_ = JIT_UNREGISTER_FN;
402 descriptor.timestamp_ = GetNextTimestamp(descriptor);
403 Sequnlock(descriptor);
404
405 NativeInfo::NotifyNativeDebugger();
406
407 // Delete the entry.
408 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
409 CHECK_EQ(writable_entry->seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Expected valid entry";
410 // Release: Ensures that "next_" points to valid entry at any time in reader.
411 writable_entry->seqlock_.fetch_add(1, std::memory_order_release); // Mark as invalid.
412 // Release: Ensures that the entry is seen as invalid before it's data is freed.
413 std::atomic_thread_fence(std::memory_order_release);
414 const uint8_t* symfile = entry->symfile_addr_;
415 writable_entry->symfile_addr_ = nullptr;
416 if (NativeInfo::kCopySymfileData && symfile != nullptr) {
417 NativeInfo::Free(symfile);
418 }
419
420 // Push the entry to the free list.
421 writable_entry->next_.store(descriptor.free_entries_, kNonRacingRelaxed);
422 writable_entry->prev_ = nullptr;
423 descriptor.free_entries_ = entry;
424 }
425
AddNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)426 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
427 MutexLock mu(self, g_dex_debug_lock);
428 DCHECK(dexfile != nullptr);
429 // Container dex files (v41) may store data past the size defined in the header.
430 uint32_t size = dexfile->SizeIncludingSharedData();
431 if (dexfile->IsCompactDexFile()) {
432 // Compact dex files may store data past the size defined in the header.
433 const DexFile::Header& header = dexfile->GetHeader();
434 size = std::max(size, header.data_off_ + header.data_size_);
435 }
436 const ArrayRef<const uint8_t> symfile(dexfile->Begin(), size);
437 CreateJITCodeEntryInternal<DexNativeInfo>(symfile);
438 }
439
RemoveNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)440 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
441 MutexLock mu(self, g_dex_debug_lock);
442 DCHECK(dexfile != nullptr);
443 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
444 // there might be cases where we load the dex file without using it in the class linker.
445 // On the other hand, single dex file might also be used with different class-loaders.
446 for (const JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) {
447 const JITCodeEntry* next = entry->next_; // Save next pointer before we free the memory.
448 if (entry->symfile_addr_ == dexfile->Begin()) {
449 DeleteJITCodeEntryInternal<DexNativeInfo>(entry);
450 }
451 entry = next;
452 }
453 }
454
455 // Splits the linked linked in to two parts:
456 // The first part (including the static head pointer) is owned by the application.
457 // The second part is owned by zygote and might be concurrently modified by it.
458 //
459 // We add two empty entries at the boundary which are never removed (app_tail, zygote_head).
460 // These entries are needed to preserve the next/prev pointers in the linked list,
461 // since zygote can not modify the application's data and vice versa.
462 //
463 // <------- owned by the application memory --------> <--- owned by zygote memory --->
464 // |----------------------|------------------|-------------|-----------------|
465 // head -> | application_entries* | application_tail | zygote_head | zygote_entries* |
466 // |+---------------------|------------------|-------------|----------------+|
467 // | |
468 // \-(new application entries) (new zygote entries)-/
469 //
470 // Zygote entries are inserted at the end, which means that repacked zygote entries
471 // will still be seen by single forward iteration of the linked list (avoiding race).
472 //
473 // Application entries are inserted at the start which introduces repacking race,
474 // but that is ok, since it is easy to read new entries from head in further pass.
475 // The benefit is that this makes it fast to read only the new entries.
476 //
NativeDebugInfoPreFork()477 void NativeDebugInfoPreFork() {
478 CHECK(Runtime::Current()->IsZygote());
479 MutexLock mu(Thread::Current(), *Locks::jit_lock_); // Needed to alloc entry.
480 MutexLock mu2(Thread::Current(), g_jit_debug_lock);
481 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
482 if (descriptor.zygote_head_entry_ != nullptr) {
483 return; // Already done - we need to do this only on the first fork.
484 }
485
486 // Create the zygote-owned head entry (with no ELF file).
487 // The data will be allocated from the current JIT memory (owned by zygote).
488 const JITCodeEntry* zygote_head =
489 reinterpret_cast<const JITCodeEntry*>(JitNativeInfo::Alloc(sizeof(JITCodeEntry)));
490 CHECK(zygote_head != nullptr);
491 new (JitNativeInfo::Writable(zygote_head)) JITCodeEntry(); // Initialize.
492 InsertNewEntry<JitNativeInfo>(zygote_head, descriptor.head_);
493 descriptor.zygote_head_entry_ = zygote_head;
494
495 // Create the child-owned tail entry (with no ELF file).
496 // The data is statically allocated since it must be owned by the forked process.
497 InsertNewEntry<JitNativeInfo>(&descriptor.application_tail_entry_, descriptor.head_);
498 }
499
NativeDebugInfoPostFork()500 void NativeDebugInfoPostFork() {
501 CHECK(!Runtime::Current()->IsZygote());
502 MutexLock mu(Thread::Current(), g_jit_debug_lock);
503 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
504 descriptor.free_entries_ = nullptr; // Don't reuse zygote's entries.
505 }
506
507 // Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group.
508 // The start address of method's code determines which group it belongs to. The end is irrelevant.
509 // New mini debug infos will be merged if possible, and entries for GCed functions will be removed.
RepackEntries(bool compress_entries,ArrayRef<const void * > removed)510 static void RepackEntries(bool compress_entries, ArrayRef<const void*> removed)
511 REQUIRES(g_jit_debug_lock) {
512 DCHECK(std::is_sorted(removed.begin(), removed.end()));
513 jit::Jit* jit = Runtime::Current()->GetJit();
514 if (jit == nullptr) {
515 return;
516 }
517 JITDescriptor& descriptor = __jit_debug_descriptor;
518 bool is_zygote = Runtime::Current()->IsZygote();
519
520 // Collect entries that we want to pack.
521 std::vector<const JITCodeEntry*> entries;
522 entries.reserve(2 * kJitRepackFrequency);
523 for (const JITCodeEntry* it = descriptor.head_; it != nullptr; it = it->next_) {
524 if (it == descriptor.zygote_head_entry_ && !is_zygote) {
525 break; // Memory owned by the zygote process (read-only for an app).
526 }
527 if (it->allow_packing_) {
528 if (!compress_entries && it->is_compressed_ && removed.empty()) {
529 continue; // If we are not compressing, also avoid decompressing.
530 }
531 entries.push_back(it);
532 }
533 }
534 auto cmp = [](const JITCodeEntry* l, const JITCodeEntry* r) { return l->addr_ < r->addr_; };
535 std::sort(entries.begin(), entries.end(), cmp); // Sort by address.
536
537 // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize).
538 for (auto group_it = entries.begin(); group_it != entries.end();) {
539 const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize);
540 const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize;
541
542 // Find all entries in this group (each entry is an in-memory ELF file).
543 auto begin = group_it;
544 auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; });
545 CHECK(end > begin);
546 ArrayRef<const JITCodeEntry*> elfs(&*begin, end - begin);
547
548 // Find all symbols that have been removed in this memory range.
549 auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr);
550 auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end);
551 CHECK(removed_end >= removed_begin);
552 ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin);
553
554 // Optimization: Don't compress the last group since it will likely change again soon.
555 bool compress = compress_entries && end != entries.end();
556
557 // Bail out early if there is nothing to do for this group.
558 if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) {
559 group_it = end; // Go to next group.
560 continue;
561 }
562
563 // Create new single JITCodeEntry that covers this memory range.
564 uint64_t start_time = MicroTime();
565 size_t live_symbols;
566 std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT(
567 elfs, removed_subset, compress, &live_symbols);
568 VLOG(jit)
569 << "JIT mini-debug-info repacked"
570 << " for " << group_ptr
571 << " in " << MicroTime() - start_time << "us"
572 << " elfs=" << elfs.size()
573 << " dead=" << removed_subset.size()
574 << " live=" << live_symbols
575 << " size=" << packed.size() << (compress ? "(lzma)" : "");
576
577 // Replace the old entries with the new one (with their lifetime temporally overlapping).
578 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed),
579 /*addr_=*/ group_ptr,
580 /*allow_packing_=*/ true,
581 /*is_compressed_=*/ compress);
582 for (auto it : elfs) {
583 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
584 }
585 group_it = end; // Go to next group.
586 }
587 g_jit_num_unpacked_entries = 0;
588 }
589
590 static void RepackNativeDebugInfoForJitLocked() REQUIRES(g_jit_debug_lock);
591
AddNativeDebugInfoForJit(const void * code_ptr,const std::vector<uint8_t> & symfile,bool allow_packing)592 void AddNativeDebugInfoForJit(const void* code_ptr,
593 const std::vector<uint8_t>& symfile,
594 bool allow_packing) {
595 MutexLock mu(Thread::Current(), g_jit_debug_lock);
596 DCHECK_NE(symfile.size(), 0u);
597 if (kIsDebugBuild && code_ptr != nullptr) {
598 DCHECK(g_dcheck_all_jit_functions.insert(code_ptr).second) << code_ptr << " already added";
599 }
600
601 // Remove all methods which have been marked for removal. The JIT GC should
602 // force repack, so this should happen only rarely for various corner cases.
603 // Must be done before addition in case the added code_ptr is in the removed set.
604 if (!g_removed_jit_functions.empty()) {
605 RepackNativeDebugInfoForJitLocked();
606 }
607
608 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile),
609 /*addr=*/ code_ptr,
610 /*allow_packing=*/ allow_packing,
611 /*is_compressed=*/ false);
612
613 if (code_ptr == nullptr) {
614 VLOG(jit) << "JIT mini-debug-info added for new type, size=" << PrettySize(symfile.size());
615 } else {
616 VLOG(jit)
617 << "JIT mini-debug-info added for native code at " << code_ptr
618 << ", size=" << PrettySize(symfile.size());
619 }
620
621 // Automatically repack entries on regular basis to save space.
622 // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x.
623 // We delay compression until after GC since it is more expensive (and saves further ~4x).
624 // Always compress zygote, since it does not GC and we want to keep the high-water mark low.
625 if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) {
626 bool is_zygote = Runtime::Current()->IsZygote();
627 RepackEntries(/*compress_entries=*/ is_zygote, /*removed=*/ ArrayRef<const void*>());
628 }
629 }
630
RemoveNativeDebugInfoForJit(const void * code_ptr)631 void RemoveNativeDebugInfoForJit(const void* code_ptr) {
632 MutexLock mu(Thread::Current(), g_jit_debug_lock);
633 g_dcheck_all_jit_functions.erase(code_ptr);
634
635 // Method removal is very expensive since we need to decompress and read ELF files.
636 // Collet methods to be removed and do the removal in bulk later.
637 g_removed_jit_functions.push_back(code_ptr);
638
639 VLOG(jit) << "JIT mini-debug-info removed for " << code_ptr;
640 }
641
RepackNativeDebugInfoForJitLocked()642 static void RepackNativeDebugInfoForJitLocked() {
643 // Remove entries which are inside packed and compressed ELF files.
644 std::vector<const void*>& removed = g_removed_jit_functions;
645 std::sort(removed.begin(), removed.end());
646 RepackEntries(/*compress_entries=*/ true, ArrayRef<const void*>(removed));
647
648 // Remove entries which are not allowed to be packed (containing single method each).
649 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr;) {
650 const JITCodeEntry* next = it->next_;
651 if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) {
652 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
653 }
654 it = next;
655 }
656
657 removed.clear();
658 removed.shrink_to_fit();
659 }
660
RepackNativeDebugInfoForJit()661 void RepackNativeDebugInfoForJit() {
662 MutexLock mu(Thread::Current(), g_jit_debug_lock);
663 RepackNativeDebugInfoForJitLocked();
664 }
665
GetJitMiniDebugInfoMemUsage()666 size_t GetJitMiniDebugInfoMemUsage() {
667 MutexLock mu(Thread::Current(), g_jit_debug_lock);
668 size_t size = 0;
669 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
670 size += sizeof(JITCodeEntry) + it->symfile_size_;
671 }
672 return size;
673 }
674
GetNativeDebugInfoLock()675 Mutex* GetNativeDebugInfoLock() {
676 return &g_jit_debug_lock;
677 }
678
ForEachNativeDebugSymbol(std::function<void (const void *,size_t,const char *)> cb)679 void ForEachNativeDebugSymbol(std::function<void(const void*, size_t, const char*)> cb) {
680 MutexLock mu(Thread::Current(), g_jit_debug_lock);
681 using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
682 const JITCodeEntry* end = __jit_debug_descriptor.zygote_head_entry_;
683 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != end; it = it->next_) {
684 ArrayRef<const uint8_t> buffer(it->symfile_addr_, it->symfile_size_);
685 if (!buffer.empty()) {
686 ElfDebugReader<ElfRuntimeTypes> reader(buffer);
687 reader.VisitFunctionSymbols([&](ElfRuntimeTypes::Sym sym, const char* name) {
688 cb(reinterpret_cast<const void*>(sym.st_value), sym.st_size, name);
689 });
690 }
691 }
692 }
693
694 } // namespace art
695