1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <inttypes.h>
31 #include <malloc.h>
32 #include <pthread.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/syscall.h>
40 #include <unistd.h>
41
42 #include <mutex>
43 #include <vector>
44
45 #include <android-base/file.h>
46 #include <android-base/properties.h>
47 #include <android-base/stringprintf.h>
48 #include <bionic/malloc_tagged_pointers.h>
49 #include <platform/bionic/reserved_signals.h>
50 #include <private/MallocXmlElem.h>
51 #include <private/bionic_malloc_dispatch.h>
52 #include <unwindstack/Unwinder.h>
53
54 #include "Config.h"
55 #include "DebugData.h"
56 #include "LogAllocatorStats.h"
57 #include "Nanotime.h"
58 #include "Unreachable.h"
59 #include "UnwindBacktrace.h"
60 #include "backtrace.h"
61 #include "debug_disable.h"
62 #include "debug_log.h"
63 #include "malloc_debug.h"
64
65 // ------------------------------------------------------------------------
66 // Global Data
67 // ------------------------------------------------------------------------
68 DebugData* g_debug;
69
70 bool* g_zygote_child;
71
72 const MallocDispatch* g_dispatch;
73
74 namespace {
75 // A TimedResult contains the result of from malloc end_ns al. functions and the
76 // start/end timestamps.
77 struct TimedResult {
78 uint64_t start_ns = 0;
79 uint64_t end_ns = 0;
80 union {
81 size_t s;
82 int i;
83 void* p;
84 } v;
85
GetStartTimeNS__anonf491307d0111::TimedResult86 uint64_t GetStartTimeNS() const { return start_ns; }
GetEndTimeNS__anonf491307d0111::TimedResult87 uint64_t GetEndTimeNS() const { return end_ns; }
SetStartTimeNS__anonf491307d0111::TimedResult88 void SetStartTimeNS(uint64_t t) { start_ns = t; }
SetEndTimeNS__anonf491307d0111::TimedResult89 void SetEndTimeNS(uint64_t t) { end_ns = t; }
90
91 template <typename T>
92 void setValue(T);
93 template <>
setValue__anonf491307d0111::TimedResult94 void setValue(size_t s) {
95 v.s = s;
96 }
97 template <>
setValue__anonf491307d0111::TimedResult98 void setValue(int i) {
99 v.i = i;
100 }
101 template <>
setValue__anonf491307d0111::TimedResult102 void setValue(void* p) {
103 v.p = p;
104 }
105
106 template <typename T>
107 T getValue() const;
108 template <>
getValue__anonf491307d0111::TimedResult109 size_t getValue<size_t>() const {
110 return v.s;
111 }
112 template <>
getValue__anonf491307d0111::TimedResult113 int getValue<int>() const {
114 return v.i;
115 }
116 template <>
getValue__anonf491307d0111::TimedResult117 void* getValue<void*>() const {
118 return v.p;
119 }
120 };
121
122 class ScopedTimer {
123 public:
ScopedTimer(TimedResult & res)124 ScopedTimer(TimedResult& res) : res_(res) { res_.start_ns = Nanotime(); }
125
~ScopedTimer()126 ~ScopedTimer() { res_.end_ns = Nanotime(); }
127
128 private:
129 TimedResult& res_;
130 };
131
132 } // namespace
133
134 template <typename MallocFn, typename... Args>
TimerCall(MallocFn fn,Args...args)135 static TimedResult TimerCall(MallocFn fn, Args... args) {
136 TimedResult ret;
137 decltype((g_dispatch->*fn)(args...)) r;
138 if (g_debug->config().options() & RECORD_ALLOCS) {
139 ScopedTimer t(ret);
140 r = (g_dispatch->*fn)(args...);
141 } else {
142 r = (g_dispatch->*fn)(args...);
143 }
144 ret.setValue<decltype(r)>(r);
145 return ret;
146 }
147
148 template <typename MallocFn, typename... Args>
TimerCallVoid(MallocFn fn,Args...args)149 static TimedResult TimerCallVoid(MallocFn fn, Args... args) {
150 TimedResult ret;
151 {
152 ScopedTimer t(ret);
153 (g_dispatch->*fn)(args...);
154 }
155 return ret;
156 }
157
158 #define TCALL(FUNC, ...) TimerCall(&MallocDispatch::FUNC, __VA_ARGS__);
159 #define TCALLVOID(FUNC, ...) TimerCallVoid(&MallocDispatch::FUNC, __VA_ARGS__);
160
161 // ------------------------------------------------------------------------
162
163 // ------------------------------------------------------------------------
164 // Use C style prototypes for all exported functions. This makes it easy
165 // to do dlsym lookups during libc initialization when malloc debug
166 // is enabled.
167 // ------------------------------------------------------------------------
168 __BEGIN_DECLS
169
170 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* malloc_zygote_child,
171 const char* options);
172 void debug_finalize();
173 void debug_dump_heap(const char* file_name);
174 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
175 size_t* total_memory, size_t* backtrace_size);
176 bool debug_write_malloc_leak_info(FILE* fp);
177 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
178 void debug_free_malloc_leak_info(uint8_t* info);
179 size_t debug_malloc_usable_size(void* pointer);
180 void* debug_malloc(size_t size);
181 void debug_free(void* pointer);
182 void* debug_aligned_alloc(size_t alignment, size_t size);
183 void* debug_memalign(size_t alignment, size_t bytes);
184 void* debug_realloc(void* pointer, size_t bytes);
185 void* debug_calloc(size_t nmemb, size_t bytes);
186 struct mallinfo debug_mallinfo();
187 int debug_mallopt(int param, int value);
188 int debug_malloc_info(int options, FILE* fp);
189 int debug_posix_memalign(void** memptr, size_t alignment, size_t size);
190 int debug_malloc_iterate(uintptr_t base, size_t size,
191 void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
192 void debug_malloc_disable();
193 void debug_malloc_enable();
194
195 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
196 void* debug_pvalloc(size_t bytes);
197 void* debug_valloc(size_t size);
198 #endif
199
200 __END_DECLS
201 // ------------------------------------------------------------------------
202
203 class ScopedConcurrentLock {
204 public:
ScopedConcurrentLock()205 ScopedConcurrentLock() {
206 pthread_rwlock_rdlock(&lock_);
207 }
~ScopedConcurrentLock()208 ~ScopedConcurrentLock() {
209 pthread_rwlock_unlock(&lock_);
210 }
211
Init()212 static void Init() {
213 pthread_rwlockattr_t attr;
214 // Set the attribute so that when a write lock is pending, read locks are no
215 // longer granted.
216 pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
217 pthread_rwlock_init(&lock_, &attr);
218 }
219
BlockAllOperations()220 static void BlockAllOperations() {
221 pthread_rwlock_wrlock(&lock_);
222 }
223
224 private:
225 static pthread_rwlock_t lock_;
226 };
227 pthread_rwlock_t ScopedConcurrentLock::lock_;
228
229 // Use this because the sigprocmask* functions filter out the reserved bionic
230 // signals including the signal this code blocks.
__rt_sigprocmask(int how,const sigset64_t * new_set,sigset64_t * old_set,size_t sigset_size)231 static inline int __rt_sigprocmask(int how, const sigset64_t* new_set, sigset64_t* old_set,
232 size_t sigset_size) {
233 return syscall(SYS_rt_sigprocmask, how, new_set, old_set, sigset_size);
234 }
235
236 // Need to block the backtrace signal while in malloc debug routines
237 // otherwise there is a chance of a deadlock and timeout when unwinding.
238 // This can occur if a thread is paused while owning a malloc debug
239 // internal lock.
240 class ScopedBacktraceSignalBlocker {
241 public:
ScopedBacktraceSignalBlocker()242 ScopedBacktraceSignalBlocker() {
243 sigemptyset64(&backtrace_set_);
244 sigaddset64(&backtrace_set_, BIONIC_SIGNAL_BACKTRACE);
245 sigset64_t old_set;
246 __rt_sigprocmask(SIG_BLOCK, &backtrace_set_, &old_set, sizeof(backtrace_set_));
247 if (sigismember64(&old_set, BIONIC_SIGNAL_BACKTRACE)) {
248 unblock_ = false;
249 }
250 }
251
~ScopedBacktraceSignalBlocker()252 ~ScopedBacktraceSignalBlocker() {
253 if (unblock_) {
254 __rt_sigprocmask(SIG_UNBLOCK, &backtrace_set_, nullptr, sizeof(backtrace_set_));
255 }
256 }
257
258 private:
259 bool unblock_ = true;
260 sigset64_t backtrace_set_;
261 };
262
InitAtfork()263 static void InitAtfork() {
264 static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
265 pthread_once(&atfork_init, []() {
266 pthread_atfork(
267 []() {
268 if (g_debug != nullptr) {
269 g_debug->PrepareFork();
270 }
271 },
272 []() {
273 if (g_debug != nullptr) {
274 g_debug->PostForkParent();
275 }
276 },
277 []() {
278 if (g_debug != nullptr) {
279 g_debug->PostForkChild();
280 }
281 });
282 });
283 }
284
BacktraceAndLog()285 void BacktraceAndLog() {
286 if (g_debug->config().options() & BACKTRACE_FULL) {
287 std::vector<uintptr_t> frames;
288 std::vector<unwindstack::FrameData> frames_info;
289 if (!Unwind(&frames, &frames_info, 256)) {
290 error_log(" Backtrace failed to get any frames.");
291 } else {
292 UnwindLog(frames_info);
293 }
294 } else {
295 std::vector<uintptr_t> frames(256);
296 size_t num_frames = backtrace_get(frames.data(), frames.size());
297 if (num_frames == 0) {
298 error_log(" Backtrace failed to get any frames.");
299 } else {
300 backtrace_log(frames.data(), num_frames);
301 }
302 }
303 }
304
LogError(const void * pointer,const char * error_str)305 static void LogError(const void* pointer, const char* error_str) {
306 error_log(LOG_DIVIDER);
307 error_log("+++ ALLOCATION %p %s", pointer, error_str);
308
309 // If we are tracking already freed pointers, check to see if this is
310 // one so we can print extra information.
311 if (g_debug->config().options() & FREE_TRACK) {
312 PointerData::LogFreeBacktrace(pointer);
313 }
314
315 error_log("Backtrace at time of failure:");
316 BacktraceAndLog();
317 error_log(LOG_DIVIDER);
318 if (g_debug->config().options() & ABORT_ON_ERROR) {
319 abort();
320 }
321 }
322
VerifyPointer(const void * pointer,const char * function_name)323 static bool VerifyPointer(const void* pointer, const char* function_name) {
324 if (g_debug->HeaderEnabled()) {
325 Header* header = g_debug->GetHeader(pointer);
326 if (header->tag != DEBUG_TAG) {
327 std::string error_str;
328 if (header->tag == DEBUG_FREE_TAG) {
329 error_str = std::string("USED AFTER FREE (") + function_name + ")";
330 } else {
331 error_str = android::base::StringPrintf("HAS INVALID TAG %" PRIx32 " (%s)", header->tag,
332 function_name);
333 }
334 LogError(pointer, error_str.c_str());
335 return false;
336 }
337 }
338
339 if (g_debug->TrackPointers()) {
340 if (!PointerData::Exists(pointer)) {
341 std::string error_str(std::string("UNKNOWN POINTER (") + function_name + ")");
342 LogError(pointer, error_str.c_str());
343 return false;
344 }
345 }
346 return true;
347 }
348
InternalMallocUsableSize(void * pointer)349 static size_t InternalMallocUsableSize(void* pointer) {
350 if (g_debug->HeaderEnabled()) {
351 return g_debug->GetHeader(pointer)->usable_size;
352 } else {
353 return g_dispatch->malloc_usable_size(pointer);
354 }
355 }
356
InitHeader(Header * header,void * orig_pointer,size_t size)357 static void* InitHeader(Header* header, void* orig_pointer, size_t size) {
358 header->tag = DEBUG_TAG;
359 header->orig_pointer = orig_pointer;
360 header->size = size;
361 header->usable_size = g_dispatch->malloc_usable_size(orig_pointer);
362 if (header->usable_size == 0) {
363 g_dispatch->free(orig_pointer);
364 return nullptr;
365 }
366 header->usable_size -= g_debug->pointer_offset() + reinterpret_cast<uintptr_t>(header) -
367 reinterpret_cast<uintptr_t>(orig_pointer);
368
369 if (g_debug->config().options() & FRONT_GUARD) {
370 uint8_t* guard = g_debug->GetFrontGuard(header);
371 memset(guard, g_debug->config().front_guard_value(), g_debug->config().front_guard_bytes());
372 }
373
374 if (g_debug->config().options() & REAR_GUARD) {
375 uint8_t* guard = g_debug->GetRearGuard(header);
376 memset(guard, g_debug->config().rear_guard_value(), g_debug->config().rear_guard_bytes());
377 // If the rear guard is enabled, set the usable size to the exact size
378 // of the allocation.
379 header->usable_size = header->size;
380 }
381
382 return g_debug->GetPointer(header);
383 }
384
385 extern "C" void __asan_init() __attribute__((weak));
386
debug_initialize(const MallocDispatch * malloc_dispatch,bool * zygote_child,const char * options)387 bool debug_initialize(const MallocDispatch* malloc_dispatch, bool* zygote_child,
388 const char* options) {
389 if (zygote_child == nullptr || options == nullptr) {
390 return false;
391 }
392
393 if (__asan_init != 0) {
394 error_log("malloc debug cannot be enabled alongside ASAN");
395 return false;
396 }
397
398 InitAtfork();
399
400 g_zygote_child = zygote_child;
401
402 g_dispatch = malloc_dispatch;
403
404 if (!DebugDisableInitialize()) {
405 return false;
406 }
407
408 DebugData* debug = new DebugData();
409 if (!debug->Initialize(options) || !Unreachable::Initialize(debug->config())) {
410 delete debug;
411 DebugDisableFinalize();
412 return false;
413 }
414 g_debug = debug;
415
416 // Always enable the backtrace code since we will use it in a number
417 // of different error cases.
418 backtrace_startup();
419
420 if (g_debug->config().options() & VERBOSE) {
421 info_log("%s: malloc debug enabled", getprogname());
422 }
423
424 ScopedConcurrentLock::Init();
425
426 return true;
427 }
428
debug_finalize()429 void debug_finalize() {
430 if (g_debug == nullptr) {
431 return;
432 }
433
434 // Make sure that there are no other threads doing debug allocations
435 // before we kill everything.
436 ScopedConcurrentLock::BlockAllOperations();
437
438 // Turn off capturing allocations calls.
439 DebugDisableSet(true);
440
441 if (g_debug->config().options() & FREE_TRACK) {
442 PointerData::VerifyAllFreed();
443 }
444
445 if (g_debug->config().options() & LEAK_TRACK) {
446 PointerData::LogLeaks();
447 }
448
449 if ((g_debug->config().options() & RECORD_ALLOCS) && g_debug->config().record_allocs_on_exit()) {
450 RecordData::WriteEntriesOnExit();
451 }
452
453 if ((g_debug->config().options() & BACKTRACE) && g_debug->config().backtrace_dump_on_exit()) {
454 debug_dump_heap(android::base::StringPrintf("%s.%d.exit.txt",
455 g_debug->config().backtrace_dump_prefix().c_str(),
456 getpid()).c_str());
457 }
458
459 if (g_debug->config().options() & LOG_ALLOCATOR_STATS_ON_EXIT) {
460 LogAllocatorStats::Log();
461 }
462
463 backtrace_shutdown();
464
465 // In order to prevent any issues of threads freeing previous pointers
466 // after the main thread calls this code, simply leak the g_debug pointer
467 // and do not destroy the debug disable pthread key.
468 }
469
debug_get_malloc_leak_info(uint8_t ** info,size_t * overall_size,size_t * info_size,size_t * total_memory,size_t * backtrace_size)470 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
471 size_t* total_memory, size_t* backtrace_size) {
472 ScopedConcurrentLock lock;
473 ScopedDisableDebugCalls disable;
474 ScopedBacktraceSignalBlocker blocked;
475
476 // Verify the arguments.
477 if (info == nullptr || overall_size == nullptr || info_size == nullptr || total_memory == nullptr ||
478 backtrace_size == nullptr) {
479 error_log("get_malloc_leak_info: At least one invalid parameter.");
480 return;
481 }
482
483 *info = nullptr;
484 *overall_size = 0;
485 *info_size = 0;
486 *total_memory = 0;
487 *backtrace_size = 0;
488
489 if (!(g_debug->config().options() & BACKTRACE)) {
490 error_log(
491 "get_malloc_leak_info: Allocations not being tracked, to enable "
492 "set the option 'backtrace'.");
493 return;
494 }
495
496 PointerData::GetInfo(info, overall_size, info_size, total_memory, backtrace_size);
497 }
498
debug_free_malloc_leak_info(uint8_t * info)499 void debug_free_malloc_leak_info(uint8_t* info) {
500 g_dispatch->free(info);
501 // Purge the memory that was freed since a significant amount of
502 // memory could have been allocated and freed.
503 g_dispatch->mallopt(M_PURGE_ALL, 0);
504 }
505
debug_malloc_usable_size(void * pointer)506 size_t debug_malloc_usable_size(void* pointer) {
507 Unreachable::CheckIfRequested(g_debug->config());
508
509 if (DebugCallsDisabled() || pointer == nullptr) {
510 return g_dispatch->malloc_usable_size(pointer);
511 }
512 ScopedConcurrentLock lock;
513 ScopedDisableDebugCalls disable;
514 ScopedBacktraceSignalBlocker blocked;
515
516 if (!VerifyPointer(pointer, "malloc_usable_size")) {
517 return 0;
518 }
519
520 return InternalMallocUsableSize(pointer);
521 }
522
InternalMalloc(size_t size)523 static TimedResult InternalMalloc(size_t size) {
524 uint64_t options = g_debug->config().options();
525 if ((options & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
526 debug_dump_heap(android::base::StringPrintf(
527 "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
528 .c_str());
529 }
530 if (options & LOG_ALLOCATOR_STATS_ON_SIGNAL) {
531 LogAllocatorStats::CheckIfShouldLog();
532 }
533
534 if (size == 0) {
535 size = 1;
536 }
537
538 TimedResult result;
539
540 size_t real_size = size + g_debug->extra_bytes();
541 if (real_size < size) {
542 // Overflow.
543 errno = ENOMEM;
544 result.setValue<void*>(nullptr);
545 return result;
546 }
547
548 if (size > PointerInfoType::MaxSize()) {
549 errno = ENOMEM;
550 result.setValue<void*>(nullptr);
551 return result;
552 }
553
554 if (g_debug->HeaderEnabled()) {
555 result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
556 Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
557 if (header == nullptr) {
558 return result;
559 }
560 result.setValue<void*>(InitHeader(header, header, size));
561 } else {
562 result = TCALL(malloc, real_size);
563 }
564
565 void* pointer = result.getValue<void*>();
566
567 if (pointer != nullptr) {
568 if (g_debug->TrackPointers()) {
569 PointerData::Add(pointer, size);
570 }
571
572 if (g_debug->config().options() & FILL_ON_ALLOC) {
573 size_t bytes = InternalMallocUsableSize(pointer);
574 size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
575 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
576 memset(pointer, g_debug->config().fill_alloc_value(), bytes);
577 }
578 }
579
580 return result;
581 }
582
debug_malloc(size_t size)583 void* debug_malloc(size_t size) {
584 Unreachable::CheckIfRequested(g_debug->config());
585
586 if (DebugCallsDisabled()) {
587 return g_dispatch->malloc(size);
588 }
589 ScopedConcurrentLock lock;
590 ScopedDisableDebugCalls disable;
591 ScopedBacktraceSignalBlocker blocked;
592
593 TimedResult result = InternalMalloc(size);
594
595 if (g_debug->config().options() & RECORD_ALLOCS) {
596 g_debug->record->AddEntry(
597 memory_trace::Entry{.tid = gettid(),
598 .type = memory_trace::MALLOC,
599 .ptr = reinterpret_cast<uint64_t>(result.getValue<void*>()),
600 .size = size,
601 .start_ns = result.GetStartTimeNS(),
602 .end_ns = result.GetEndTimeNS()});
603 }
604
605 return result.getValue<void*>();
606 }
607
InternalFree(void * pointer)608 static TimedResult InternalFree(void* pointer) {
609 uint64_t options = g_debug->config().options();
610 if ((options & BACKTRACE) && g_debug->pointer->ShouldDumpAndReset()) {
611 debug_dump_heap(android::base::StringPrintf(
612 "%s.%d.txt", g_debug->config().backtrace_dump_prefix().c_str(), getpid())
613 .c_str());
614 }
615 if (options & LOG_ALLOCATOR_STATS_ON_SIGNAL) {
616 LogAllocatorStats::CheckIfShouldLog();
617 }
618
619 void* free_pointer = pointer;
620 size_t bytes;
621 Header* header;
622 if (g_debug->HeaderEnabled()) {
623 header = g_debug->GetHeader(pointer);
624 free_pointer = header->orig_pointer;
625
626 if (g_debug->config().options() & FRONT_GUARD) {
627 if (!g_debug->front_guard->Valid(header)) {
628 g_debug->front_guard->LogFailure(header);
629 }
630 }
631 if (g_debug->config().options() & REAR_GUARD) {
632 if (!g_debug->rear_guard->Valid(header)) {
633 g_debug->rear_guard->LogFailure(header);
634 }
635 }
636
637 header->tag = DEBUG_FREE_TAG;
638
639 bytes = header->usable_size;
640 } else {
641 bytes = g_dispatch->malloc_usable_size(pointer);
642 }
643
644 if (g_debug->config().options() & FILL_ON_FREE) {
645 size_t fill_bytes = g_debug->config().fill_on_free_bytes();
646 fill_bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
647 memset(pointer, g_debug->config().fill_free_value(), fill_bytes);
648 }
649
650 if (g_debug->TrackPointers()) {
651 PointerData::Remove(pointer);
652 }
653
654 TimedResult result;
655 if (g_debug->config().options() & FREE_TRACK) {
656 // Do not add the allocation until we are done modifying the pointer
657 // itself. This avoids a race if a lot of threads are all doing
658 // frees at the same time and we wind up trying to really free this
659 // pointer from another thread, while still trying to free it in
660 // this function.
661 pointer = PointerData::AddFreed(pointer, bytes);
662 if (pointer != nullptr && g_debug->HeaderEnabled()) {
663 pointer = g_debug->GetHeader(pointer)->orig_pointer;
664 }
665 result = TCALLVOID(free, pointer);
666 } else {
667 result = TCALLVOID(free, free_pointer);
668 }
669
670 return result;
671 }
672
debug_free(void * pointer)673 void debug_free(void* pointer) {
674 Unreachable::CheckIfRequested(g_debug->config());
675
676 if (DebugCallsDisabled() || pointer == nullptr) {
677 return g_dispatch->free(pointer);
678 }
679 ScopedConcurrentLock lock;
680 ScopedDisableDebugCalls disable;
681 ScopedBacktraceSignalBlocker blocked;
682
683 if (!VerifyPointer(pointer, "free")) {
684 return;
685 }
686
687 TimedResult result = InternalFree(pointer);
688
689 if (g_debug->config().options() & RECORD_ALLOCS) {
690 g_debug->record->AddEntry(memory_trace::Entry{.tid = gettid(),
691 .type = memory_trace::FREE,
692 .ptr = reinterpret_cast<uint64_t>(pointer),
693 .start_ns = result.GetStartTimeNS(),
694 .end_ns = result.GetEndTimeNS()});
695 }
696 }
697
debug_memalign(size_t alignment,size_t bytes)698 void* debug_memalign(size_t alignment, size_t bytes) {
699 Unreachable::CheckIfRequested(g_debug->config());
700
701 if (DebugCallsDisabled()) {
702 return g_dispatch->memalign(alignment, bytes);
703 }
704 ScopedConcurrentLock lock;
705 ScopedDisableDebugCalls disable;
706 ScopedBacktraceSignalBlocker blocked;
707
708 if (bytes == 0) {
709 bytes = 1;
710 }
711
712 if (bytes > PointerInfoType::MaxSize()) {
713 errno = ENOMEM;
714 return nullptr;
715 }
716
717 TimedResult result;
718 void* pointer;
719 if (g_debug->HeaderEnabled()) {
720 // Make the alignment a power of two.
721 if (!powerof2(alignment)) {
722 alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
723 }
724 // Force the alignment to at least MINIMUM_ALIGNMENT_BYTES to guarantee
725 // that the header is aligned properly.
726 if (alignment < MINIMUM_ALIGNMENT_BYTES) {
727 alignment = MINIMUM_ALIGNMENT_BYTES;
728 }
729
730 // We don't have any idea what the natural alignment of
731 // the underlying native allocator is, so we always need to
732 // over allocate.
733 size_t real_size = alignment + bytes + g_debug->extra_bytes();
734 if (real_size < bytes) {
735 // Overflow.
736 errno = ENOMEM;
737 return nullptr;
738 }
739
740 result = TCALL(malloc, real_size);
741 pointer = result.getValue<void*>();
742 if (pointer == nullptr) {
743 return nullptr;
744 }
745
746 uintptr_t value = reinterpret_cast<uintptr_t>(pointer) + g_debug->pointer_offset();
747 // Now align the pointer.
748 value += (-value % alignment);
749
750 Header* header = g_debug->GetHeader(reinterpret_cast<void*>(value));
751 // Don't need to update `result` here because we only need the timestamps.
752 pointer = InitHeader(header, pointer, bytes);
753 } else {
754 size_t real_size = bytes + g_debug->extra_bytes();
755 if (real_size < bytes) {
756 // Overflow.
757 errno = ENOMEM;
758 return nullptr;
759 }
760 result = TCALL(memalign, alignment, real_size);
761 pointer = result.getValue<void*>();
762 }
763
764 if (pointer != nullptr) {
765 if (g_debug->TrackPointers()) {
766 PointerData::Add(pointer, bytes);
767 }
768
769 if (g_debug->config().options() & FILL_ON_ALLOC) {
770 size_t bytes = InternalMallocUsableSize(pointer);
771 size_t fill_bytes = g_debug->config().fill_on_alloc_bytes();
772 bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
773 memset(pointer, g_debug->config().fill_alloc_value(), bytes);
774 }
775
776 if (g_debug->config().options() & RECORD_ALLOCS) {
777 g_debug->record->AddEntry(memory_trace::Entry{.tid = gettid(),
778 .type = memory_trace::MEMALIGN,
779 .ptr = reinterpret_cast<uint64_t>(pointer),
780 .size = bytes,
781 .u.align = alignment,
782 .start_ns = result.GetStartTimeNS(),
783 .end_ns = result.GetEndTimeNS()});
784 }
785 }
786
787 return pointer;
788 }
789
debug_realloc(void * pointer,size_t bytes)790 void* debug_realloc(void* pointer, size_t bytes) {
791 Unreachable::CheckIfRequested(g_debug->config());
792
793 if (DebugCallsDisabled()) {
794 return g_dispatch->realloc(pointer, bytes);
795 }
796 ScopedConcurrentLock lock;
797 ScopedDisableDebugCalls disable;
798 ScopedBacktraceSignalBlocker blocked;
799
800 if (pointer == nullptr) {
801 TimedResult result = InternalMalloc(bytes);
802 pointer = result.getValue<void*>();
803 if (g_debug->config().options() & RECORD_ALLOCS) {
804 g_debug->record->AddEntry(memory_trace::Entry{.tid = gettid(),
805 .type = memory_trace::REALLOC,
806 .ptr = reinterpret_cast<uint64_t>(pointer),
807 .size = bytes,
808 .u.old_ptr = 0,
809 .start_ns = result.GetStartTimeNS(),
810 .end_ns = result.GetEndTimeNS()});
811 }
812 return pointer;
813 }
814
815 if (!VerifyPointer(pointer, "realloc")) {
816 return nullptr;
817 }
818
819 if (bytes == 0) {
820 TimedResult result = InternalFree(pointer);
821
822 if (g_debug->config().options() & RECORD_ALLOCS) {
823 g_debug->record->AddEntry(
824 memory_trace::Entry{.tid = gettid(),
825 .type = memory_trace::REALLOC,
826 .ptr = 0,
827 .size = 0,
828 .u.old_ptr = reinterpret_cast<uint64_t>(pointer),
829 .start_ns = result.GetStartTimeNS(),
830 .end_ns = result.GetEndTimeNS()});
831 }
832
833 return nullptr;
834 }
835
836 size_t real_size = bytes;
837 if (g_debug->config().options() & EXPAND_ALLOC) {
838 real_size += g_debug->config().expand_alloc_bytes();
839 if (real_size < bytes) {
840 // Overflow.
841 errno = ENOMEM;
842 return nullptr;
843 }
844 }
845
846 if (bytes > PointerInfoType::MaxSize()) {
847 errno = ENOMEM;
848 return nullptr;
849 }
850
851 TimedResult result;
852 void* new_pointer;
853 size_t prev_size;
854 if (g_debug->HeaderEnabled()) {
855 // Same size, do nothing.
856 Header* header = g_debug->GetHeader(pointer);
857 if (real_size == header->size) {
858 if (g_debug->TrackPointers()) {
859 // Remove and re-add so that the backtrace is updated.
860 PointerData::Remove(pointer);
861 PointerData::Add(pointer, real_size);
862 }
863 return pointer;
864 }
865
866 // Allocation is shrinking.
867 if (real_size < header->usable_size) {
868 header->size = real_size;
869 if (g_debug->config().options() & REAR_GUARD) {
870 // Don't bother allocating a smaller pointer in this case, simply
871 // change the header usable_size and reset the rear guard.
872 header->usable_size = header->size;
873 memset(g_debug->GetRearGuard(header), g_debug->config().rear_guard_value(),
874 g_debug->config().rear_guard_bytes());
875 }
876 if (g_debug->TrackPointers()) {
877 // Remove and re-add so that the backtrace is updated.
878 PointerData::Remove(pointer);
879 PointerData::Add(pointer, real_size);
880 }
881 return pointer;
882 }
883
884 // Allocate the new size.
885 result = InternalMalloc(bytes);
886 new_pointer = result.getValue<void*>();
887 if (new_pointer == nullptr) {
888 errno = ENOMEM;
889 return nullptr;
890 }
891
892 prev_size = header->usable_size;
893 memcpy(new_pointer, pointer, prev_size);
894 TimedResult free_time = InternalFree(pointer);
895 // `realloc` is split into two steps, update the end time to the finish time
896 // of the second operation.
897 result.SetEndTimeNS(free_time.GetEndTimeNS());
898 } else {
899 if (g_debug->TrackPointers()) {
900 PointerData::Remove(pointer);
901 }
902
903 prev_size = g_dispatch->malloc_usable_size(pointer);
904 result = TCALL(realloc, pointer, real_size);
905 new_pointer = result.getValue<void*>();
906 if (new_pointer == nullptr) {
907 return nullptr;
908 }
909
910 if (g_debug->TrackPointers()) {
911 PointerData::Add(new_pointer, real_size);
912 }
913 }
914
915 if (g_debug->config().options() & FILL_ON_ALLOC) {
916 size_t bytes = InternalMallocUsableSize(new_pointer);
917 if (bytes > g_debug->config().fill_on_alloc_bytes()) {
918 bytes = g_debug->config().fill_on_alloc_bytes();
919 }
920 if (bytes > prev_size) {
921 memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(new_pointer) + prev_size),
922 g_debug->config().fill_alloc_value(), bytes - prev_size);
923 }
924 }
925
926 if (g_debug->config().options() & RECORD_ALLOCS) {
927 g_debug->record->AddEntry(memory_trace::Entry{.tid = gettid(),
928 .type = memory_trace::REALLOC,
929 .ptr = reinterpret_cast<uint64_t>(new_pointer),
930 .size = bytes,
931 .u.old_ptr = reinterpret_cast<uint64_t>(pointer),
932 .start_ns = result.GetStartTimeNS(),
933 .end_ns = result.GetEndTimeNS()});
934 }
935
936 return new_pointer;
937 }
938
debug_calloc(size_t nmemb,size_t bytes)939 void* debug_calloc(size_t nmemb, size_t bytes) {
940 Unreachable::CheckIfRequested(g_debug->config());
941
942 if (DebugCallsDisabled()) {
943 return g_dispatch->calloc(nmemb, bytes);
944 }
945 ScopedConcurrentLock lock;
946 ScopedDisableDebugCalls disable;
947 ScopedBacktraceSignalBlocker blocked;
948
949 size_t size;
950 if (__builtin_mul_overflow(nmemb, bytes, &size)) {
951 // Overflow
952 errno = ENOMEM;
953 return nullptr;
954 }
955
956 if (size == 0) {
957 size = 1;
958 }
959
960 size_t real_size;
961 if (__builtin_add_overflow(size, g_debug->extra_bytes(), &real_size)) {
962 // Overflow.
963 errno = ENOMEM;
964 return nullptr;
965 }
966
967 if (real_size > PointerInfoType::MaxSize()) {
968 errno = ENOMEM;
969 return nullptr;
970 }
971
972 void* pointer;
973 TimedResult result;
974 if (g_debug->HeaderEnabled()) {
975 // Need to guarantee the alignment of the header.
976 result = TCALL(memalign, MINIMUM_ALIGNMENT_BYTES, real_size);
977 Header* header = reinterpret_cast<Header*>(result.getValue<void*>());
978 if (header == nullptr) {
979 return nullptr;
980 }
981 memset(header, 0, g_dispatch->malloc_usable_size(header));
982 pointer = InitHeader(header, header, size);
983 } else {
984 result = TCALL(calloc, 1, real_size);
985 pointer = result.getValue<void*>();
986 }
987
988 if (g_debug->config().options() & RECORD_ALLOCS) {
989 g_debug->record->AddEntry(memory_trace::Entry{.tid = gettid(),
990 .type = memory_trace::CALLOC,
991 .ptr = reinterpret_cast<uint64_t>(pointer),
992 .size = bytes,
993 .u.n_elements = nmemb,
994 .start_ns = result.GetStartTimeNS(),
995 .end_ns = result.GetEndTimeNS()});
996 }
997
998 if (pointer != nullptr && g_debug->TrackPointers()) {
999 PointerData::Add(pointer, size);
1000 }
1001 return pointer;
1002 }
1003
debug_mallinfo()1004 struct mallinfo debug_mallinfo() {
1005 return g_dispatch->mallinfo();
1006 }
1007
debug_mallopt(int param,int value)1008 int debug_mallopt(int param, int value) {
1009 return g_dispatch->mallopt(param, value);
1010 }
1011
debug_malloc_info(int options,FILE * fp)1012 int debug_malloc_info(int options, FILE* fp) {
1013 if (DebugCallsDisabled() || !g_debug->TrackPointers()) {
1014 return g_dispatch->malloc_info(options, fp);
1015 }
1016
1017 // Make sure any pending output is written to the file.
1018 fflush(fp);
1019
1020 ScopedConcurrentLock lock;
1021 ScopedDisableDebugCalls disable;
1022 ScopedBacktraceSignalBlocker blocked;
1023
1024 // Avoid any issues where allocations are made that will be freed
1025 // in the fclose.
1026 int fd = fileno(fp);
1027 MallocXmlElem root(fd, "malloc", "version=\"debug-malloc-1\"");
1028 std::vector<ListInfoType> list;
1029 PointerData::GetAllocList(&list);
1030
1031 size_t alloc_num = 0;
1032 for (size_t i = 0; i < list.size(); i++) {
1033 MallocXmlElem alloc(fd, "allocation", "nr=\"%zu\"", alloc_num);
1034
1035 size_t total = 1;
1036 size_t size = list[i].size;
1037 while (i < list.size() - 1 && list[i + 1].size == size) {
1038 i++;
1039 total++;
1040 }
1041 MallocXmlElem(fd, "size").Contents("%zu", list[i].size);
1042 MallocXmlElem(fd, "total").Contents("%zu", total);
1043 alloc_num++;
1044 }
1045 return 0;
1046 }
1047
debug_aligned_alloc(size_t alignment,size_t size)1048 void* debug_aligned_alloc(size_t alignment, size_t size) {
1049 Unreachable::CheckIfRequested(g_debug->config());
1050
1051 if (DebugCallsDisabled()) {
1052 return g_dispatch->aligned_alloc(alignment, size);
1053 }
1054 if (!powerof2(alignment) || (size % alignment) != 0) {
1055 errno = EINVAL;
1056 return nullptr;
1057 }
1058 return debug_memalign(alignment, size);
1059 }
1060
debug_posix_memalign(void ** memptr,size_t alignment,size_t size)1061 int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
1062 Unreachable::CheckIfRequested(g_debug->config());
1063
1064 if (DebugCallsDisabled()) {
1065 return g_dispatch->posix_memalign(memptr, alignment, size);
1066 }
1067
1068 if (alignment < sizeof(void*) || !powerof2(alignment)) {
1069 return EINVAL;
1070 }
1071 int saved_errno = errno;
1072 *memptr = debug_memalign(alignment, size);
1073 errno = saved_errno;
1074 return (*memptr != nullptr) ? 0 : ENOMEM;
1075 }
1076
debug_malloc_iterate(uintptr_t base,size_t size,void (* callback)(uintptr_t,size_t,void *),void * arg)1077 int debug_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*),
1078 void* arg) {
1079 ScopedConcurrentLock lock;
1080 if (g_debug->TrackPointers()) {
1081 PointerData::IteratePointers([&callback, &arg](uintptr_t pointer) {
1082 callback(pointer, InternalMallocUsableSize(reinterpret_cast<void*>(pointer)), arg);
1083 });
1084 return 0;
1085 }
1086
1087 // An option that adds a header will add pointer tracking, so no need to
1088 // check if headers are enabled.
1089 return g_dispatch->malloc_iterate(base, size, callback, arg);
1090 }
1091
debug_malloc_disable()1092 void debug_malloc_disable() {
1093 ScopedConcurrentLock lock;
1094 g_dispatch->malloc_disable();
1095 if (g_debug->pointer) {
1096 g_debug->pointer->PrepareFork();
1097 }
1098 }
1099
debug_malloc_enable()1100 void debug_malloc_enable() {
1101 ScopedConcurrentLock lock;
1102 if (g_debug->pointer) {
1103 g_debug->pointer->PostForkParent();
1104 }
1105 g_dispatch->malloc_enable();
1106 }
1107
debug_malloc_backtrace(void * pointer,uintptr_t * frames,size_t max_frames)1108 ssize_t debug_malloc_backtrace(void* pointer, uintptr_t* frames, size_t max_frames) {
1109 if (DebugCallsDisabled() || pointer == nullptr) {
1110 return 0;
1111 }
1112 ScopedConcurrentLock lock;
1113 ScopedDisableDebugCalls disable;
1114 ScopedBacktraceSignalBlocker blocked;
1115
1116 if (!(g_debug->config().options() & BACKTRACE)) {
1117 return 0;
1118 }
1119 pointer = UntagPointer(pointer);
1120 return PointerData::GetFrames(pointer, frames, max_frames);
1121 }
1122
1123 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
debug_pvalloc(size_t bytes)1124 void* debug_pvalloc(size_t bytes) {
1125 Unreachable::CheckIfRequested(g_debug->config());
1126
1127 if (DebugCallsDisabled()) {
1128 return g_dispatch->pvalloc(bytes);
1129 }
1130
1131 size_t pagesize = getpagesize();
1132 size_t size = __BIONIC_ALIGN(bytes, pagesize);
1133 if (size < bytes) {
1134 // Overflow
1135 errno = ENOMEM;
1136 return nullptr;
1137 }
1138 return debug_memalign(pagesize, size);
1139 }
1140
debug_valloc(size_t size)1141 void* debug_valloc(size_t size) {
1142 Unreachable::CheckIfRequested(g_debug->config());
1143
1144 if (DebugCallsDisabled()) {
1145 return g_dispatch->valloc(size);
1146 }
1147 return debug_memalign(getpagesize(), size);
1148 }
1149 #endif
1150
1151 static std::mutex g_dump_lock;
1152
write_dump(int fd)1153 static void write_dump(int fd) {
1154 dprintf(fd, "Android Native Heap Dump v1.2\n\n");
1155
1156 std::string fingerprint = android::base::GetProperty("ro.build.fingerprint", "unknown");
1157 dprintf(fd, "Build fingerprint: '%s'\n\n", fingerprint.c_str());
1158
1159 PointerData::DumpLiveToFile(fd);
1160
1161 dprintf(fd, "MAPS\n");
1162 std::string content;
1163 if (!android::base::ReadFileToString("/proc/self/maps", &content)) {
1164 dprintf(fd, "Could not open /proc/self/maps\n");
1165 } else {
1166 dprintf(fd, "%s", content.c_str());
1167 }
1168 dprintf(fd, "END\n");
1169
1170 // Purge the memory that was allocated and freed during this operation
1171 // since it can be large enough to expand the RSS significantly.
1172 g_dispatch->mallopt(M_PURGE_ALL, 0);
1173 }
1174
debug_write_malloc_leak_info(FILE * fp)1175 bool debug_write_malloc_leak_info(FILE* fp) {
1176 // Make sure any pending output is written to the file.
1177 fflush(fp);
1178
1179 ScopedConcurrentLock lock;
1180 ScopedDisableDebugCalls disable;
1181 ScopedBacktraceSignalBlocker blocked;
1182
1183 std::lock_guard<std::mutex> guard(g_dump_lock);
1184
1185 if (!(g_debug->config().options() & BACKTRACE)) {
1186 return false;
1187 }
1188
1189 write_dump(fileno(fp));
1190
1191 return true;
1192 }
1193
debug_dump_heap(const char * file_name)1194 void debug_dump_heap(const char* file_name) {
1195 ScopedConcurrentLock lock;
1196 ScopedDisableDebugCalls disable;
1197 ScopedBacktraceSignalBlocker blocked;
1198
1199 std::lock_guard<std::mutex> guard(g_dump_lock);
1200
1201 int fd = open(file_name, O_RDWR | O_CREAT | O_NOFOLLOW | O_TRUNC | O_CLOEXEC, 0644);
1202 if (fd == -1) {
1203 error_log("Unable to create file: %s", file_name);
1204 return;
1205 }
1206
1207 error_log("Dumping to file: %s\n", file_name);
1208 write_dump(fd);
1209 close(fd);
1210 }
1211