1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/shim/allocator_shim.h"
6 
7 #include <atomic>
8 #include <cstdlib>
9 #include <cstring>
10 #include <iomanip>
11 #include <memory>
12 #include <new>
13 #include <sstream>
14 #include <vector>
15 
16 #include "base/synchronization/waitable_event.h"
17 #include "base/threading/platform_thread.h"
18 #include "build/build_config.h"
19 #include "partition_alloc/partition_alloc.h"
20 #include "partition_alloc/partition_alloc_base/memory/page_size.h"
21 #include "partition_alloc/partition_alloc_buildflags.h"
22 #include "testing/gmock/include/gmock/gmock.h"
23 #include "testing/gtest/include/gtest/gtest.h"
24 
25 #if BUILDFLAG(IS_WIN)
26 #include <windows.h>
27 
28 #include <malloc.h>
29 #elif BUILDFLAG(IS_APPLE)
30 #include <malloc/malloc.h>
31 
32 #include "partition_alloc/shim/allocator_interception_apple.h"
33 #include "partition_alloc/third_party/apple_apsl/malloc.h"
34 #else
35 #include <malloc.h>
36 #endif
37 
38 #if !BUILDFLAG(IS_WIN)
39 #include <unistd.h>
40 #endif
41 
42 #if defined(LIBC_GLIBC)
43 extern "C" void* __libc_memalign(size_t align, size_t s);
44 #endif
45 
46 namespace allocator_shim {
47 namespace {
48 
49 using testing::_;
50 using testing::MockFunction;
51 
52 // Special sentinel values used for testing GetSizeEstimate() interception.
53 const char kTestSizeEstimateData[] = "test_value";
54 constexpr void* kTestSizeEstimateAddress = (void*)kTestSizeEstimateData;
55 constexpr size_t kTestSizeEstimate = 1234;
56 
57 class AllocatorShimTest : public testing::Test {
58  public:
AllocatorShimTest()59   AllocatorShimTest() : testing::Test() {}
60 
Hash(const void * ptr)61   static size_t Hash(const void* ptr) {
62     return reinterpret_cast<uintptr_t>(ptr) % MaxSizeTracked();
63   }
64 
MockAlloc(const AllocatorDispatch * self,size_t size,void * context)65   static void* MockAlloc(const AllocatorDispatch* self,
66                          size_t size,
67                          void* context) {
68     if (instance_ && size < MaxSizeTracked()) {
69       ++(instance_->allocs_intercepted_by_size[size]);
70     }
71     return self->next->alloc_function(self->next, size, context);
72   }
73 
MockAllocUnchecked(const AllocatorDispatch * self,size_t size,void * context)74   static void* MockAllocUnchecked(const AllocatorDispatch* self,
75                                   size_t size,
76                                   void* context) {
77     if (instance_ && size < MaxSizeTracked()) {
78       ++(instance_->allocs_intercepted_by_size[size]);
79     }
80     return self->next->alloc_unchecked_function(self->next, size, context);
81   }
82 
MockAllocZeroInit(const AllocatorDispatch * self,size_t n,size_t size,void * context)83   static void* MockAllocZeroInit(const AllocatorDispatch* self,
84                                  size_t n,
85                                  size_t size,
86                                  void* context) {
87     const size_t real_size = n * size;
88     if (instance_ && real_size < MaxSizeTracked()) {
89       ++(instance_->zero_allocs_intercepted_by_size[real_size]);
90     }
91     return self->next->alloc_zero_initialized_function(self->next, n, size,
92                                                        context);
93   }
94 
MockAllocAligned(const AllocatorDispatch * self,size_t alignment,size_t size,void * context)95   static void* MockAllocAligned(const AllocatorDispatch* self,
96                                 size_t alignment,
97                                 size_t size,
98                                 void* context) {
99     if (instance_) {
100       if (size < MaxSizeTracked()) {
101         ++(instance_->aligned_allocs_intercepted_by_size[size]);
102       }
103       if (alignment < MaxSizeTracked()) {
104         ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
105       }
106     }
107     return self->next->alloc_aligned_function(self->next, alignment, size,
108                                               context);
109   }
110 
MockRealloc(const AllocatorDispatch * self,void * address,size_t size,void * context)111   static void* MockRealloc(const AllocatorDispatch* self,
112                            void* address,
113                            size_t size,
114                            void* context) {
115     if (instance_) {
116       // Size 0xFEED is a special sentinel for the NewHandlerConcurrency test.
117       // Hitting it for the first time will cause a failure, causing the
118       // invocation of the std::new_handler.
119       if (size == 0xFEED) {
120         thread_local bool did_fail_realloc_0xfeed_once = false;
121         if (!did_fail_realloc_0xfeed_once) {
122           did_fail_realloc_0xfeed_once = true;
123           return nullptr;
124         }
125         return address;
126       }
127 
128       if (size < MaxSizeTracked()) {
129         ++(instance_->reallocs_intercepted_by_size[size]);
130       }
131       ++instance_->reallocs_intercepted_by_addr[Hash(address)];
132     }
133     return self->next->realloc_function(self->next, address, size, context);
134   }
135 
MockFree(const AllocatorDispatch * self,void * address,void * context)136   static void MockFree(const AllocatorDispatch* self,
137                        void* address,
138                        void* context) {
139     if (instance_) {
140       ++instance_->frees_intercepted_by_addr[Hash(address)];
141     }
142     self->next->free_function(self->next, address, context);
143   }
144 
MockGetSizeEstimate(const AllocatorDispatch * self,void * address,void * context)145   static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
146                                     void* address,
147                                     void* context) {
148     // Special testing values for GetSizeEstimate() interception.
149     if (address == kTestSizeEstimateAddress) {
150       return kTestSizeEstimate;
151     }
152     return self->next->get_size_estimate_function(self->next, address, context);
153   }
154 
MockClaimedAddress(const AllocatorDispatch * self,void * address,void * context)155   static bool MockClaimedAddress(const AllocatorDispatch* self,
156                                  void* address,
157                                  void* context) {
158     // The same as MockGetSizeEstimate.
159     if (address == kTestSizeEstimateAddress) {
160       return true;
161     }
162     return self->next->claimed_address_function(self->next, address, context);
163   }
164 
MockGoodSize(const AllocatorDispatch * self,size_t size,void * context)165   static size_t MockGoodSize(const AllocatorDispatch* self,
166                              size_t size,
167                              void* context) {
168     return size;
169   }
170 
MockBatchMalloc(const AllocatorDispatch * self,size_t size,void ** results,unsigned num_requested,void * context)171   static unsigned MockBatchMalloc(const AllocatorDispatch* self,
172                                   size_t size,
173                                   void** results,
174                                   unsigned num_requested,
175                                   void* context) {
176     if (instance_) {
177       instance_->batch_mallocs_intercepted_by_size[size] =
178           instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
179     }
180     return self->next->batch_malloc_function(self->next, size, results,
181                                              num_requested, context);
182   }
183 
MockBatchFree(const AllocatorDispatch * self,void ** to_be_freed,unsigned num_to_be_freed,void * context)184   static void MockBatchFree(const AllocatorDispatch* self,
185                             void** to_be_freed,
186                             unsigned num_to_be_freed,
187                             void* context) {
188     if (instance_) {
189       for (unsigned i = 0; i < num_to_be_freed; ++i) {
190         ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
191       }
192     }
193     self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
194                                     context);
195   }
196 
MockFreeDefiniteSize(const AllocatorDispatch * self,void * ptr,size_t size,void * context)197   static void MockFreeDefiniteSize(const AllocatorDispatch* self,
198                                    void* ptr,
199                                    size_t size,
200                                    void* context) {
201     if (instance_) {
202       ++instance_->frees_intercepted_by_addr[Hash(ptr)];
203       ++instance_->free_definite_sizes_intercepted_by_size[size];
204     }
205     self->next->free_definite_size_function(self->next, ptr, size, context);
206   }
207 
MockTryFreeDefault(const AllocatorDispatch * self,void * ptr,void * context)208   static void MockTryFreeDefault(const AllocatorDispatch* self,
209                                  void* ptr,
210                                  void* context) {
211     if (instance_) {
212       ++instance_->frees_intercepted_by_addr[Hash(ptr)];
213     }
214     self->next->try_free_default_function(self->next, ptr, context);
215   }
216 
MockAlignedMalloc(const AllocatorDispatch * self,size_t size,size_t alignment,void * context)217   static void* MockAlignedMalloc(const AllocatorDispatch* self,
218                                  size_t size,
219                                  size_t alignment,
220                                  void* context) {
221     if (instance_ && size < MaxSizeTracked()) {
222       ++instance_->aligned_mallocs_intercepted_by_size[size];
223     }
224     return self->next->aligned_malloc_function(self->next, size, alignment,
225                                                context);
226   }
227 
MockAlignedRealloc(const AllocatorDispatch * self,void * address,size_t size,size_t alignment,void * context)228   static void* MockAlignedRealloc(const AllocatorDispatch* self,
229                                   void* address,
230                                   size_t size,
231                                   size_t alignment,
232                                   void* context) {
233     if (instance_) {
234       if (size < MaxSizeTracked()) {
235         ++instance_->aligned_reallocs_intercepted_by_size[size];
236       }
237       ++instance_->aligned_reallocs_intercepted_by_addr[Hash(address)];
238     }
239     return self->next->aligned_realloc_function(self->next, address, size,
240                                                 alignment, context);
241   }
242 
MockAlignedFree(const AllocatorDispatch * self,void * address,void * context)243   static void MockAlignedFree(const AllocatorDispatch* self,
244                               void* address,
245                               void* context) {
246     if (instance_) {
247       ++instance_->aligned_frees_intercepted_by_addr[Hash(address)];
248     }
249     self->next->aligned_free_function(self->next, address, context);
250   }
251 
NewHandler()252   static void NewHandler() {
253     if (!instance_) {
254       return;
255     }
256     instance_->num_new_handler_calls.fetch_add(1, std::memory_order_relaxed);
257   }
258 
GetNumberOfNewHandlerCalls()259   int32_t GetNumberOfNewHandlerCalls() {
260     return instance_->num_new_handler_calls.load(std::memory_order_acquire);
261   }
262 
SetUp()263   void SetUp() override {
264     allocs_intercepted_by_size.resize(MaxSizeTracked());
265     zero_allocs_intercepted_by_size.resize(MaxSizeTracked());
266     aligned_allocs_intercepted_by_size.resize(MaxSizeTracked());
267     aligned_allocs_intercepted_by_alignment.resize(MaxSizeTracked());
268     reallocs_intercepted_by_size.resize(MaxSizeTracked());
269     reallocs_intercepted_by_addr.resize(MaxSizeTracked());
270     frees_intercepted_by_addr.resize(MaxSizeTracked());
271     batch_mallocs_intercepted_by_size.resize(MaxSizeTracked());
272     batch_frees_intercepted_by_addr.resize(MaxSizeTracked());
273     free_definite_sizes_intercepted_by_size.resize(MaxSizeTracked());
274     aligned_mallocs_intercepted_by_size.resize(MaxSizeTracked());
275     aligned_reallocs_intercepted_by_size.resize(MaxSizeTracked());
276     aligned_reallocs_intercepted_by_addr.resize(MaxSizeTracked());
277     aligned_frees_intercepted_by_addr.resize(MaxSizeTracked());
278     num_new_handler_calls.store(0, std::memory_order_release);
279     instance_ = this;
280 
281 #if BUILDFLAG(IS_APPLE)
282     InitializeAllocatorShim();
283 #endif
284   }
285 
TearDown()286   void TearDown() override {
287     instance_ = nullptr;
288 #if BUILDFLAG(IS_APPLE)
289     UninterceptMallocZonesForTesting();
290 #endif
291   }
292 
MaxSizeTracked()293   static size_t MaxSizeTracked() {
294 #if BUILDFLAG(IS_IOS)
295     // TODO(crbug.com/1077271): 64-bit iOS uses a page size that is larger than
296     // SystemPageSize(), causing this test to make larger allocations, relative
297     // to SystemPageSize().
298     return 6 * partition_alloc::internal::SystemPageSize();
299 #else
300     return 2 * partition_alloc::internal::SystemPageSize();
301 #endif
302   }
303 
304  protected:
305   std::vector<size_t> allocs_intercepted_by_size;
306   std::vector<size_t> zero_allocs_intercepted_by_size;
307   std::vector<size_t> aligned_allocs_intercepted_by_size;
308   std::vector<size_t> aligned_allocs_intercepted_by_alignment;
309   std::vector<size_t> reallocs_intercepted_by_size;
310   std::vector<size_t> reallocs_intercepted_by_addr;
311   std::vector<size_t> frees_intercepted_by_addr;
312   std::vector<size_t> batch_mallocs_intercepted_by_size;
313   std::vector<size_t> batch_frees_intercepted_by_addr;
314   std::vector<size_t> free_definite_sizes_intercepted_by_size;
315   std::vector<size_t> aligned_mallocs_intercepted_by_size;
316   std::vector<size_t> aligned_reallocs_intercepted_by_size;
317   std::vector<size_t> aligned_reallocs_intercepted_by_addr;
318   std::vector<size_t> aligned_frees_intercepted_by_addr;
319   std::atomic<uint32_t> num_new_handler_calls;
320 
321  private:
322   static AllocatorShimTest* instance_;
323 };
324 
325 struct TestStruct1 {
326   uint32_t ignored;
327   uint8_t ignored_2;
328 };
329 
330 struct TestStruct2 {
331   uint64_t ignored;
332   uint8_t ignored_3;
333 };
334 
335 class ThreadDelegateForNewHandlerTest : public base::PlatformThread::Delegate {
336  public:
ThreadDelegateForNewHandlerTest(base::WaitableEvent * event)337   explicit ThreadDelegateForNewHandlerTest(base::WaitableEvent* event)
338       : event_(event) {}
339 
ThreadMain()340   void ThreadMain() override {
341     event_->Wait();
342     void* temp = malloc(1);
343     void* res = realloc(temp, 0xFEED);
344     EXPECT_EQ(temp, res);
345   }
346 
347  private:
348   base::WaitableEvent* event_;
349 };
350 
351 AllocatorShimTest* AllocatorShimTest::instance_ = nullptr;
352 
353 AllocatorDispatch g_mock_dispatch = {
354     &AllocatorShimTest::MockAlloc,          /* alloc_function */
355     &AllocatorShimTest::MockAllocUnchecked, /* alloc_unchecked_function */
356     &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */
357     &AllocatorShimTest::MockAllocAligned,  /* alloc_aligned_function */
358     &AllocatorShimTest::MockRealloc,       /* realloc_function */
359     &AllocatorShimTest::MockFree,          /* free_function */
360     &AllocatorShimTest::MockGetSizeEstimate,  /* get_size_estimate_function */
361     &AllocatorShimTest::MockGoodSize,         /* good_size */
362     &AllocatorShimTest::MockClaimedAddress,   /* claimed_address_function */
363     &AllocatorShimTest::MockBatchMalloc,      /* batch_malloc_function */
364     &AllocatorShimTest::MockBatchFree,        /* batch_free_function */
365     &AllocatorShimTest::MockFreeDefiniteSize, /* free_definite_size_function */
366     &AllocatorShimTest::MockTryFreeDefault,   /* try_free_default_function */
367     &AllocatorShimTest::MockAlignedMalloc,    /* aligned_malloc_function */
368     &AllocatorShimTest::MockAlignedRealloc,   /* aligned_realloc_function */
369     &AllocatorShimTest::MockAlignedFree,      /* aligned_free_function */
370     nullptr,                                  /* next */
371 };
372 
TEST_F(AllocatorShimTest,InterceptLibcSymbols)373 TEST_F(AllocatorShimTest, InterceptLibcSymbols) {
374   InsertAllocatorDispatch(&g_mock_dispatch);
375 
376   void* alloc_ptr = malloc(19);
377   ASSERT_NE(nullptr, alloc_ptr);
378   ASSERT_GE(allocs_intercepted_by_size[19], 1u);
379 
380   void* zero_alloc_ptr = calloc(2, 23);
381   ASSERT_NE(nullptr, zero_alloc_ptr);
382   ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u);
383 
384 #if !BUILDFLAG(IS_WIN)
385   void* posix_memalign_ptr = nullptr;
386   int res = posix_memalign(&posix_memalign_ptr, 256, 59);
387   ASSERT_EQ(0, res);
388   ASSERT_NE(nullptr, posix_memalign_ptr);
389   ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256);
390   ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u);
391   ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u);
392 
393   // (p)valloc() are not defined on Android. pvalloc() is a GNU extension,
394   // valloc() is not in POSIX.
395 #if !BUILDFLAG(IS_ANDROID)
396   const size_t kPageSize = partition_alloc::internal::base::GetPageSize();
397   void* valloc_ptr = valloc(61);
398   ASSERT_NE(nullptr, valloc_ptr);
399   ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize);
400   ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
401   ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u);
402 #endif  // !BUILDFLAG(IS_ANDROID)
403 
404 #endif  // !BUILDFLAG(IS_WIN)
405 
406 #if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
407   void* memalign_ptr = memalign(128, 53);
408   ASSERT_NE(nullptr, memalign_ptr);
409   ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
410   ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
411   ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
412 
413 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
414   void* pvalloc_ptr = pvalloc(67);
415   ASSERT_NE(nullptr, pvalloc_ptr);
416   ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
417   ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
418   // pvalloc rounds the size up to the next page.
419   ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u);
420 #endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
421 
422 #endif  // !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
423 
424 // See allocator_shim_override_glibc_weak_symbols.h for why we intercept
425 // internal libc symbols.
426 #if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
427   void* libc_memalign_ptr = __libc_memalign(512, 56);
428   ASSERT_NE(nullptr, memalign_ptr);
429   ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(libc_memalign_ptr) % 512);
430   ASSERT_GE(aligned_allocs_intercepted_by_alignment[512], 1u);
431   ASSERT_GE(aligned_allocs_intercepted_by_size[56], 1u);
432 #endif
433 
434   char* realloc_ptr = static_cast<char*>(malloc(10));
435   strcpy(realloc_ptr, "foobar");
436   void* old_realloc_ptr = realloc_ptr;
437   realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73));
438   ASSERT_GE(reallocs_intercepted_by_size[73], 1u);
439   ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u);
440   ASSERT_EQ(0, strcmp(realloc_ptr, "foobar"));
441 
442   free(alloc_ptr);
443   ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u);
444 
445   free(zero_alloc_ptr);
446   ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u);
447 
448 #if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
449   free(memalign_ptr);
450   ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
451 
452 #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
453   free(pvalloc_ptr);
454   ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
455 #endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
456 
457 #endif  // !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_APPLE)
458 
459 #if !BUILDFLAG(IS_WIN)
460   free(posix_memalign_ptr);
461   ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u);
462 
463 #if !BUILDFLAG(IS_ANDROID)
464   free(valloc_ptr);
465   ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u);
466 #endif  // !BUILDFLAG(IS_ANDROID)
467 
468 #endif  // !BUILDFLAG(IS_WIN)
469 
470 #if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
471   free(libc_memalign_ptr);
472   ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
473 #endif
474 
475   free(realloc_ptr);
476   ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u);
477 
478   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
479 
480   void* non_hooked_ptr = malloc(4095);
481   ASSERT_NE(nullptr, non_hooked_ptr);
482   ASSERT_EQ(0u, allocs_intercepted_by_size[4095]);
483   free(non_hooked_ptr);
484 }
485 
486 // PartitionAlloc-Everywhere does not support batch_malloc / batch_free.
487 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
TEST_F(AllocatorShimTest,InterceptLibcSymbolsBatchMallocFree)488 TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
489   InsertAllocatorDispatch(&g_mock_dispatch);
490 
491   unsigned count = 13;
492   std::vector<void*> results;
493   results.resize(count);
494   unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
495                                                    results.data(), count);
496   ASSERT_EQ(count, result_count);
497 
498   // TODO(erikchen): On macOS 10.12+, batch_malloc in the default zone may
499   // forward to another zone, which we've also shimmed, resulting in
500   // MockBatchMalloc getting called twice as often as we'd expect. This
501   // re-entrancy into the allocator shim is a bug that needs to be fixed.
502   // https://crbug.com/693237.
503   // ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
504 
505   std::vector<void*> results_copy(results);
506   malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
507   for (void* result : results_copy) {
508     ASSERT_GE(batch_frees_intercepted_by_addr[Hash(result)], 1u);
509   }
510   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
511 }
512 
TEST_F(AllocatorShimTest,InterceptLibcSymbolsFreeDefiniteSize)513 TEST_F(AllocatorShimTest, InterceptLibcSymbolsFreeDefiniteSize) {
514   InsertAllocatorDispatch(&g_mock_dispatch);
515 
516   void* alloc_ptr = malloc(19);
517   ASSERT_NE(nullptr, alloc_ptr);
518   ASSERT_GE(allocs_intercepted_by_size[19], 1u);
519 
520   ChromeMallocZone* default_zone =
521       reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
522   default_zone->free_definite_size(malloc_default_zone(), alloc_ptr, 19);
523   ASSERT_GE(free_definite_sizes_intercepted_by_size[19], 1u);
524   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
525 }
526 #endif  // BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
527 
528 #if BUILDFLAG(IS_WIN)
TEST_F(AllocatorShimTest,InterceptUcrtAlignedAllocationSymbols)529 TEST_F(AllocatorShimTest, InterceptUcrtAlignedAllocationSymbols) {
530   InsertAllocatorDispatch(&g_mock_dispatch);
531 
532   constexpr size_t kAlignment = 32;
533   void* alloc_ptr = _aligned_malloc(123, kAlignment);
534   EXPECT_GE(aligned_mallocs_intercepted_by_size[123], 1u);
535 
536   void* new_alloc_ptr = _aligned_realloc(alloc_ptr, 1234, kAlignment);
537   EXPECT_GE(aligned_reallocs_intercepted_by_size[1234], 1u);
538   EXPECT_GE(aligned_reallocs_intercepted_by_addr[Hash(alloc_ptr)], 1u);
539 
540   _aligned_free(new_alloc_ptr);
541   EXPECT_GE(aligned_frees_intercepted_by_addr[Hash(new_alloc_ptr)], 1u);
542 
543   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
544 }
545 
TEST_F(AllocatorShimTest,AlignedReallocSizeZeroFrees)546 TEST_F(AllocatorShimTest, AlignedReallocSizeZeroFrees) {
547   void* alloc_ptr = _aligned_malloc(123, 16);
548   ASSERT_TRUE(alloc_ptr);
549   alloc_ptr = _aligned_realloc(alloc_ptr, 0, 16);
550   ASSERT_TRUE(!alloc_ptr);
551 }
552 #endif  // BUILDFLAG(IS_WIN)
553 
TEST_F(AllocatorShimTest,InterceptCppSymbols)554 TEST_F(AllocatorShimTest, InterceptCppSymbols) {
555   InsertAllocatorDispatch(&g_mock_dispatch);
556 
557   TestStruct1* new_ptr = new TestStruct1;
558   ASSERT_NE(nullptr, new_ptr);
559   ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u);
560 
561   TestStruct1* new_array_ptr = new TestStruct1[3];
562   ASSERT_NE(nullptr, new_array_ptr);
563   ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u);
564 
565   TestStruct2* new_nt_ptr = new (std::nothrow) TestStruct2;
566   ASSERT_NE(nullptr, new_nt_ptr);
567   ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2)], 1u);
568 
569   TestStruct2* new_array_nt_ptr = new TestStruct2[3];
570   ASSERT_NE(nullptr, new_array_nt_ptr);
571   ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2) * 3], 1u);
572 
573   delete new_ptr;
574   ASSERT_GE(frees_intercepted_by_addr[Hash(new_ptr)], 1u);
575 
576   delete[] new_array_ptr;
577   ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_ptr)], 1u);
578 
579   delete new_nt_ptr;
580   ASSERT_GE(frees_intercepted_by_addr[Hash(new_nt_ptr)], 1u);
581 
582   delete[] new_array_nt_ptr;
583   ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u);
584 
585   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
586 }
587 
588 // PartitionAlloc disallows large allocations to avoid errors with int
589 // overflows.
590 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
591 struct TooLarge {
592   char padding1[1UL << 31];
593   int padding2;
594 };
595 
TEST_F(AllocatorShimTest,NewNoThrowTooLarge)596 TEST_F(AllocatorShimTest, NewNoThrowTooLarge) {
597   char* too_large_array = new (std::nothrow) char[(1UL << 31) + 100];
598   EXPECT_EQ(nullptr, too_large_array);
599 
600   TooLarge* too_large_struct = new (std::nothrow) TooLarge;
601   EXPECT_EQ(nullptr, too_large_struct);
602 }
603 #endif
604 
605 // This test exercises the case of concurrent OOM failure, which would end up
606 // invoking std::new_handler concurrently. This is to cover the CallNewHandler()
607 // paths of allocator_shim.cc and smoke-test its thread safey.
608 // The test creates kNumThreads threads. Each of them mallocs some memory, and
609 // then does a realloc(<new memory>, 0xFEED).
610 // The shim intercepts such realloc and makes it fail only once on each thread.
611 // We expect to see excactly kNumThreads invocations of the new_handler.
TEST_F(AllocatorShimTest,NewHandlerConcurrency)612 TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
613   const int kNumThreads = 32;
614   base::PlatformThreadHandle threads[kNumThreads];
615 
616   // The WaitableEvent here is used to attempt to trigger all the threads at
617   // the same time, after they have been initialized.
618   base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
619                             base::WaitableEvent::InitialState::NOT_SIGNALED);
620 
621   ThreadDelegateForNewHandlerTest mock_thread_main(&event);
622 
623   for (auto& thread : threads) {
624     base::PlatformThread::Create(0, &mock_thread_main, &thread);
625   }
626 
627   std::set_new_handler(&AllocatorShimTest::NewHandler);
628   SetCallNewHandlerOnMallocFailure(true);  // It's going to fail on realloc().
629   InsertAllocatorDispatch(&g_mock_dispatch);
630   event.Signal();
631   for (auto& thread : threads) {
632     base::PlatformThread::Join(thread);
633   }
634   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
635   ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls());
636 }
637 
638 #if BUILDFLAG(IS_WIN)
TEST_F(AllocatorShimTest,ShimReplacesCRTHeapWhenEnabled)639 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
640   ASSERT_EQ(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
641 }
642 #endif  // BUILDFLAG(IS_WIN)
643 
644 #if BUILDFLAG(IS_WIN)
GetUsableSize(void * ptr)645 static size_t GetUsableSize(void* ptr) {
646   return _msize(ptr);
647 }
648 #elif BUILDFLAG(IS_APPLE)
GetUsableSize(void * ptr)649 static size_t GetUsableSize(void* ptr) {
650   return malloc_size(ptr);
651 }
652 #elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
GetUsableSize(void * ptr)653 static size_t GetUsableSize(void* ptr) {
654   return malloc_usable_size(ptr);
655 }
656 #else
657 #define NO_MALLOC_SIZE
658 #endif
659 
660 #if !defined(NO_MALLOC_SIZE)
TEST_F(AllocatorShimTest,ShimReplacesMallocSizeWhenEnabled)661 TEST_F(AllocatorShimTest, ShimReplacesMallocSizeWhenEnabled) {
662   InsertAllocatorDispatch(&g_mock_dispatch);
663   EXPECT_EQ(GetUsableSize(kTestSizeEstimateAddress), kTestSizeEstimate);
664   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
665 }
666 
TEST_F(AllocatorShimTest,ShimDoesntChangeMallocSizeWhenEnabled)667 TEST_F(AllocatorShimTest, ShimDoesntChangeMallocSizeWhenEnabled) {
668   void* alloc = malloc(16);
669   size_t sz = GetUsableSize(alloc);
670   EXPECT_GE(sz, 16U);
671 
672   InsertAllocatorDispatch(&g_mock_dispatch);
673   EXPECT_EQ(GetUsableSize(alloc), sz);
674   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
675 
676   free(alloc);
677 }
678 #endif  // !defined(NO_MALLOC_SIZE)
679 
680 #if BUILDFLAG(IS_ANDROID)
TEST_F(AllocatorShimTest,InterceptCLibraryFunctions)681 TEST_F(AllocatorShimTest, InterceptCLibraryFunctions) {
682   auto total_counts = [](const std::vector<size_t>& counts) {
683     size_t total = 0;
684     for (const auto count : counts) {
685       total += count;
686     }
687     return total;
688   };
689   size_t counts_before;
690   size_t counts_after = total_counts(allocs_intercepted_by_size);
691   void* ptr;
692 
693   InsertAllocatorDispatch(&g_mock_dispatch);
694 
695   // <cstdlib>
696   counts_before = counts_after;
697   ptr = realpath(".", nullptr);
698   EXPECT_NE(nullptr, ptr);
699   free(ptr);
700   counts_after = total_counts(allocs_intercepted_by_size);
701   EXPECT_GT(counts_after, counts_before);
702 
703   // <cstring>
704   counts_before = counts_after;
705   ptr = strdup("hello, world");
706   EXPECT_NE(nullptr, ptr);
707   free(ptr);
708   counts_after = total_counts(allocs_intercepted_by_size);
709   EXPECT_GT(counts_after, counts_before);
710 
711   counts_before = counts_after;
712   ptr = strndup("hello, world", 5);
713   EXPECT_NE(nullptr, ptr);
714   free(ptr);
715   counts_after = total_counts(allocs_intercepted_by_size);
716   EXPECT_GT(counts_after, counts_before);
717 
718   // <unistd.h>
719   counts_before = counts_after;
720   ptr = getcwd(nullptr, 0);
721   EXPECT_NE(nullptr, ptr);
722   free(ptr);
723   counts_after = total_counts(allocs_intercepted_by_size);
724   EXPECT_GT(counts_after, counts_before);
725 
726   // With component builds on Android, we cannot intercept calls to functions
727   // inside another component, in this instance the call to vasprintf() inside
728   // libc++. This is not necessarily an issue for allocator shims, as long as we
729   // accept that allocations and deallocations will not be matched at all times.
730   // It is however essential for PartitionAlloc, which is exercized in the test
731   // below.
732 #ifndef COMPONENT_BUILD
733   // Calls vasprintf() indirectly, see below.
734   counts_before = counts_after;
735   std::stringstream stream;
736   stream << std::setprecision(1) << std::showpoint << std::fixed << 1.e38;
737   EXPECT_GT(stream.str().size(), 30u);
738   counts_after = total_counts(allocs_intercepted_by_size);
739   EXPECT_GT(counts_after, counts_before);
740 #endif  // COMPONENT_BUILD
741 
742   RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
743 }
744 
745 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
746 // Non-regression test for crbug.com/1166558.
TEST_F(AllocatorShimTest,InterceptVasprintf)747 TEST_F(AllocatorShimTest, InterceptVasprintf) {
748   // Printing a float which expands to >=30 characters calls vasprintf() in
749   // libc, which we should intercept.
750   std::stringstream stream;
751   stream << std::setprecision(1) << std::showpoint << std::fixed << 1.e38;
752   EXPECT_GT(stream.str().size(), 30u);
753   // Should not crash.
754 }
755 
TEST_F(AllocatorShimTest,InterceptLongVasprintf)756 TEST_F(AllocatorShimTest, InterceptLongVasprintf) {
757   char* str = nullptr;
758   const char* lorem_ipsum =
759       "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed non risus. "
760       "Suspendisse lectus tortor, dignissim sit amet, adipiscing nec, "
761       "ultricies sed, dolor. Cras elementum ultrices diam. Maecenas ligula "
762       "massa, varius a, semper congue, euismod non, mi. Proin porttitor, orci "
763       "nec nonummy molestie, enim est eleifend mi, non fermentum diam nisl sit "
764       "amet erat. Duis semper. Duis arcu massa, scelerisque vitae, consequat "
765       "in, pretium a, enim. Pellentesque congue. Ut in risus volutpat libero "
766       "pharetra tempor. Cras vestibulum bibendum augue. Praesent egestas leo "
767       "in pede. Praesent blandit odio eu enim. Pellentesque sed dui ut augue "
768       "blandit sodales. Vestibulum ante ipsum primis in faucibus orci luctus "
769       "et ultrices posuere cubilia Curae; Aliquam nibh. Mauris ac mauris sed "
770       "pede pellentesque fermentum. Maecenas adipiscing ante non diam sodales "
771       "hendrerit.";
772   int err = asprintf(&str, "%s", lorem_ipsum);
773   EXPECT_EQ(err, static_cast<int>(strlen(lorem_ipsum)));
774   EXPECT_TRUE(str);
775   free(str);
776 }
777 
778 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
779 
780 #endif  // BUILDFLAG(IS_ANDROID)
781 
782 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
783 
784 // Non-regression test for crbug.com/1291885.
TEST_F(AllocatorShimTest,BatchMalloc)785 TEST_F(AllocatorShimTest, BatchMalloc) {
786   constexpr unsigned kNumToAllocate = 20;
787   void* pointers[kNumToAllocate];
788 
789   EXPECT_EQ(kNumToAllocate, malloc_zone_batch_malloc(malloc_default_zone(), 10,
790                                                      pointers, kNumToAllocate));
791   malloc_zone_batch_free(malloc_default_zone(), pointers, kNumToAllocate);
792   // Should not crash.
793 }
794 
TEST_F(AllocatorShimTest,MallocGoodSize)795 TEST_F(AllocatorShimTest, MallocGoodSize) {
796   constexpr size_t kTestSize = 100;
797   size_t good_size = malloc_good_size(kTestSize);
798   EXPECT_GE(good_size, kTestSize);
799 }
800 
801 #endif  // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
802 
803 }  // namespace
804 }  // namespace allocator_shim
805