1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
6 #error This header is meant to be included only once by allocator_shim.cc
7 #endif
8
9 #ifndef PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
10 #define PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
11
12 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
13 #error This header must be included iff PartitionAlloc-Everywhere is enabled.
14 #endif
15
16 #include <atomic>
17 #include <cstring>
18 #include <tuple>
19
20 #include "partition_alloc/partition_alloc_base/apple/mach_logging.h"
21 #include "partition_alloc/partition_alloc_base/bits.h"
22 #include "partition_alloc/partition_alloc_base/logging.h"
23 #include "partition_alloc/partition_alloc_buildflags.h"
24 #include "partition_alloc/partition_alloc_check.h"
25 #include "partition_alloc/partition_alloc_constants.h"
26 #include "partition_alloc/partition_root.h"
27 #include "partition_alloc/shim/early_zone_registration_constants.h"
28
29 namespace allocator_shim {
30
31 namespace {
32
33 // malloc_introspection_t's callback functions for our own zone
34
MallocIntrospectionEnumerator(task_t task,void *,unsigned type_mask,vm_address_t zone_address,memory_reader_t reader,vm_range_recorder_t recorder)35 kern_return_t MallocIntrospectionEnumerator(task_t task,
36 void*,
37 unsigned type_mask,
38 vm_address_t zone_address,
39 memory_reader_t reader,
40 vm_range_recorder_t recorder) {
41 // Should enumerate all memory regions allocated by this allocator, but not
42 // implemented just because of no use case for now.
43 return KERN_FAILURE;
44 }
45
MallocIntrospectionGoodSize(malloc_zone_t * zone,size_t size)46 size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
47 return ShimGoodSize(size, nullptr);
48 }
49
MallocIntrospectionCheck(malloc_zone_t * zone)50 boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
51 // Should check the consistency of the allocator implementing this malloc
52 // zone, but not implemented just because of no use case for now.
53 return true;
54 }
55
MallocIntrospectionPrint(malloc_zone_t * zone,boolean_t verbose)56 void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
57 // Should print the current states of the zone for debugging / investigation
58 // purpose, but not implemented just because of no use case for now.
59 }
60
MallocIntrospectionLog(malloc_zone_t * zone,void * address)61 void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
62 // Should enable logging of the activities on the given `address`, but not
63 // implemented just because of no use case for now.
64 }
65
MallocIntrospectionForceLock(malloc_zone_t * zone)66 void MallocIntrospectionForceLock(malloc_zone_t* zone) {
67 // Called before fork(2) to acquire the lock.
68 partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
69 }
70
MallocIntrospectionForceUnlock(malloc_zone_t * zone)71 void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
72 // Called in the parent process after fork(2) to release the lock.
73 partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
74 }
75
MallocIntrospectionStatistics(malloc_zone_t * zone,malloc_statistics_t * stats)76 void MallocIntrospectionStatistics(malloc_zone_t* zone,
77 malloc_statistics_t* stats) {
78 // Should report the memory usage correctly, but not implemented just because
79 // of no use case for now.
80 stats->blocks_in_use = 0;
81 stats->size_in_use = 0;
82 stats->max_size_in_use = 0; // High water mark of touched memory
83 stats->size_allocated = 0; // Reserved in memory
84 }
85
MallocIntrospectionZoneLocked(malloc_zone_t * zone)86 boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
87 // Should return true if the underlying PartitionRoot is locked, but not
88 // implemented just because this function seems not used effectively.
89 return false;
90 }
91
MallocIntrospectionEnableDischargeChecking(malloc_zone_t * zone)92 boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
93 // 'discharge' is not supported.
94 return false;
95 }
96
MallocIntrospectionDisableDischargeChecking(malloc_zone_t * zone)97 void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
98 // 'discharge' is not supported.
99 }
100
MallocIntrospectionDischarge(malloc_zone_t * zone,void * memory)101 void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
102 // 'discharge' is not supported.
103 }
104
105 void MallocIntrospectionEnumerateDischargedPointers(
106 malloc_zone_t* zone,
107 void (^report_discharged)(void* memory, void* info)) {
108 // 'discharge' is not supported.
109 }
110
MallocIntrospectionReinitLock(malloc_zone_t * zone)111 void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
112 // Called in a child process after fork(2) to re-initialize the lock.
113 partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
114 }
115
MallocIntrospectionPrintTask(task_t task,unsigned level,vm_address_t zone_address,memory_reader_t reader,print_task_printer_t printer)116 void MallocIntrospectionPrintTask(task_t task,
117 unsigned level,
118 vm_address_t zone_address,
119 memory_reader_t reader,
120 print_task_printer_t printer) {
121 // Should print the current states of another process's zone for debugging /
122 // investigation purpose, but not implemented just because of no use case
123 // for now.
124 }
125
MallocIntrospectionTaskStatistics(task_t task,vm_address_t zone_address,memory_reader_t reader,malloc_statistics_t * stats)126 void MallocIntrospectionTaskStatistics(task_t task,
127 vm_address_t zone_address,
128 memory_reader_t reader,
129 malloc_statistics_t* stats) {
130 // Should report the memory usage in another process's zone, but not
131 // implemented just because of no use case for now.
132 stats->blocks_in_use = 0;
133 stats->size_in_use = 0;
134 stats->max_size_in_use = 0; // High water mark of touched memory
135 stats->size_allocated = 0; // Reserved in memory
136 }
137
138 // malloc_zone_t's callback functions for our own zone
139
MallocZoneSize(malloc_zone_t * zone,const void * ptr)140 size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
141 return ShimGetSizeEstimate(ptr, nullptr);
142 }
143
MallocZoneMalloc(malloc_zone_t * zone,size_t size)144 void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
145 return ShimMalloc(size, nullptr);
146 }
147
MallocZoneCalloc(malloc_zone_t * zone,size_t n,size_t size)148 void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
149 return ShimCalloc(n, size, nullptr);
150 }
151
MallocZoneValloc(malloc_zone_t * zone,size_t size)152 void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
153 return ShimValloc(size, nullptr);
154 }
155
MallocZoneFree(malloc_zone_t * zone,void * ptr)156 void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
157 return ShimFree(ptr, nullptr);
158 }
159
MallocZoneRealloc(malloc_zone_t * zone,void * ptr,size_t size)160 void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
161 return ShimRealloc(ptr, size, nullptr);
162 }
163
MallocZoneDestroy(malloc_zone_t * zone)164 void MallocZoneDestroy(malloc_zone_t* zone) {
165 // No support to destroy the zone for now.
166 }
167
MallocZoneMemalign(malloc_zone_t * zone,size_t alignment,size_t size)168 void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
169 return ShimMemalign(alignment, size, nullptr);
170 }
171
MallocZoneFreeDefiniteSize(malloc_zone_t * zone,void * ptr,size_t size)172 void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
173 return ShimFreeDefiniteSize(ptr, size, nullptr);
174 }
175
MallocZoneBatchMalloc(malloc_zone_t * zone,size_t size,void ** results,unsigned num_requested)176 unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
177 size_t size,
178 void** results,
179 unsigned num_requested) {
180 return ShimBatchMalloc(size, results, num_requested, nullptr);
181 }
182
MallocZoneBatchFree(malloc_zone_t * zone,void ** to_be_freed,unsigned num)183 void MallocZoneBatchFree(malloc_zone_t* zone,
184 void** to_be_freed,
185 unsigned num) {
186 return ShimBatchFree(to_be_freed, num, nullptr);
187 }
188
MallocZoneClaimedAddress(malloc_zone_t * zone,void * ptr)189 boolean_t MallocZoneClaimedAddress(malloc_zone_t* zone, void* ptr) {
190 return static_cast<boolean_t>(ShimClaimedAddress(ptr, nullptr));
191 }
192
193 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
MallocZoneTryFreeDefault(malloc_zone_t * zone,void * ptr)194 void MallocZoneTryFreeDefault(malloc_zone_t* zone, void* ptr) {
195 return ShimTryFreeDefault(ptr, nullptr);
196 }
197 #endif
198
199 malloc_introspection_t g_mac_malloc_introspection{};
200 malloc_zone_t g_mac_malloc_zone{};
201
GetDefaultMallocZone()202 malloc_zone_t* GetDefaultMallocZone() {
203 // malloc_default_zone() does not return... the default zone, but the initial
204 // one. The default one is the first element of the default zone array.
205 unsigned int zone_count = 0;
206 vm_address_t* zones = nullptr;
207 kern_return_t result =
208 malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
209 PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
210 return reinterpret_cast<malloc_zone_t*>(zones[0]);
211 }
212
IsAlreadyRegistered()213 bool IsAlreadyRegistered() {
214 // HACK: This should really only be called once, but it is not.
215 //
216 // This function is a static constructor of its binary. If it is included in a
217 // dynamic library, then the same process may end up executing this code
218 // multiple times, once per library. As a consequence, each new library will
219 // add its own allocator as the default zone. Aside from splitting the heap
220 // further, the main issue arises if/when the last library to be loaded
221 // (dlopen()-ed) gets dlclose()-ed.
222 //
223 // See crbug.com/1271139 for details.
224 //
225 // In this case, subsequent free() will be routed by libmalloc to the deleted
226 // zone (since its code has been unloaded from memory), and crash inside
227 // libsystem's free(). This in practice happens as soon as dlclose() is
228 // called, inside the dynamic linker (dyld).
229 //
230 // Since we are talking about different library, and issues inside the dynamic
231 // linker, we cannot use a global static variable (which would be
232 // per-library), or anything from pthread.
233 //
234 // The solution used here is to check whether the current default zone is
235 // already ours, in which case we are not the first dynamic library here, and
236 // should do nothing. This is racy, and hacky.
237 vm_address_t* zones = nullptr;
238 unsigned int zone_count = 0;
239 // *Not* using malloc_default_zone(), as it seems to be hardcoded to return
240 // something else than the default zone. See the difference between
241 // malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
242 // (in libmalloc).
243 kern_return_t result =
244 malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
245 PA_MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
246 // Checking all the zones, in case someone registered their own zone on top of
247 // our own.
248 for (unsigned int i = 0; i < zone_count; i++) {
249 malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
250
251 // strcmp() and not a pointer comparison, as the zone was registered from
252 // another library, the pointers don't match.
253 if (zone->zone_name &&
254 (strcmp(zone->zone_name, kPartitionAllocZoneName) == 0)) {
255 // This zone is provided by PartitionAlloc, so this function has been
256 // called from another library (or the main executable), nothing to do.
257 //
258 // This should be a crash, ideally, but callers do it, so only warn, for
259 // now.
260 PA_RAW_LOG(ERROR,
261 "Trying to load the allocator multiple times. This is *not* "
262 "supported.");
263 return true;
264 }
265 }
266
267 return false;
268 }
269
InitializeZone()270 void InitializeZone() {
271 g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
272 g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
273 g_mac_malloc_introspection.check = MallocIntrospectionCheck;
274 g_mac_malloc_introspection.print = MallocIntrospectionPrint;
275 g_mac_malloc_introspection.log = MallocIntrospectionLog;
276 g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
277 g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
278 g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
279 g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
280 g_mac_malloc_introspection.enable_discharge_checking =
281 MallocIntrospectionEnableDischargeChecking;
282 g_mac_malloc_introspection.disable_discharge_checking =
283 MallocIntrospectionDisableDischargeChecking;
284 g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
285 g_mac_malloc_introspection.enumerate_discharged_pointers =
286 MallocIntrospectionEnumerateDischargedPointers;
287 g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
288 g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
289 g_mac_malloc_introspection.task_statistics =
290 MallocIntrospectionTaskStatistics;
291 // `version` member indicates which APIs are supported in this zone.
292 // version >= 5: memalign is supported
293 // version >= 6: free_definite_size is supported
294 // version >= 7: introspect's discharge family is supported
295 // version >= 8: pressure_relief is supported
296 // version >= 9: introspect.reinit_lock is supported
297 // version >= 10: claimed_address is supported
298 // version >= 11: introspect.print_task is supported
299 // version >= 12: introspect.task_statistics is supported
300 // version >= 13: try_free_default is supported
301 g_mac_malloc_zone.version = kZoneVersion;
302 g_mac_malloc_zone.zone_name = kPartitionAllocZoneName;
303 g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
304 g_mac_malloc_zone.size = MallocZoneSize;
305 g_mac_malloc_zone.malloc = MallocZoneMalloc;
306 g_mac_malloc_zone.calloc = MallocZoneCalloc;
307 g_mac_malloc_zone.valloc = MallocZoneValloc;
308 g_mac_malloc_zone.free = MallocZoneFree;
309 g_mac_malloc_zone.realloc = MallocZoneRealloc;
310 g_mac_malloc_zone.destroy = MallocZoneDestroy;
311 g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
312 g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
313 g_mac_malloc_zone.memalign = MallocZoneMemalign;
314 g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
315 g_mac_malloc_zone.pressure_relief = nullptr;
316 g_mac_malloc_zone.claimed_address = MallocZoneClaimedAddress;
317 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
318 g_mac_malloc_zone.try_free_default = MallocZoneTryFreeDefault;
319 #endif
320 }
321
322 namespace {
323 static std::atomic<bool> g_initialization_is_done;
324 }
325
326 // Replaces the default malloc zone with our own malloc zone backed by
327 // PartitionAlloc. Since we'd like to make as much code as possible to use our
328 // own memory allocator (and reduce bugs caused by mixed use of the system
329 // allocator and our own allocator), run the following function
330 // `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
331 //
332 // Note that, despite of the highest priority of the initialization order,
333 // [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
334 // unfortunately and allocates memory with the system allocator. Plus, the
335 // allocated memory will be deallocated with the default zone's `free` at that
336 // moment without using a zone dispatcher. Hence, our own `free` function
337 // receives an address allocated by the system allocator.
338 __attribute__((constructor(0))) void
InitializeDefaultMallocZoneWithPartitionAlloc()339 InitializeDefaultMallocZoneWithPartitionAlloc() {
340 if (IsAlreadyRegistered()) {
341 return;
342 }
343
344 // Instantiate the existing regular and purgeable zones in order to make the
345 // existing purgeable zone use the existing regular zone since PartitionAlloc
346 // doesn't support a purgeable zone.
347 std::ignore = malloc_default_zone();
348 std::ignore = malloc_default_purgeable_zone();
349
350 // Initialize the default allocator's PartitionRoot with the existing zone.
351 InitializeDefaultAllocatorPartitionRoot();
352
353 // Create our own malloc zone.
354 InitializeZone();
355
356 malloc_zone_t* system_default_zone = GetDefaultMallocZone();
357 if (strcmp(system_default_zone->zone_name, kDelegatingZoneName) == 0) {
358 // The first zone is our zone, we can unregister it, replacing it with the
359 // new one. This relies on a precise zone setup, done in
360 // |EarlyMallocZoneRegistration()|.
361 malloc_zone_register(&g_mac_malloc_zone);
362 malloc_zone_unregister(system_default_zone);
363 g_initialization_is_done.store(true, std::memory_order_release);
364 return;
365 }
366
367 // Not in the path where the zone was registered early. This is either racy,
368 // or fine if the current process is not hosting multiple threads.
369 //
370 // This path is fine for e.g. most unit tests.
371 //
372 // Make our own zone the default zone.
373 //
374 // Put our own zone at the last position, so that it promotes to the default
375 // zone. The implementation logic of malloc_zone_unregister is:
376 // zone_table.swap(unregistered_zone, last_zone);
377 // zone_table.shrink_size_by_1();
378 malloc_zone_register(&g_mac_malloc_zone);
379 malloc_zone_unregister(system_default_zone);
380 // Between malloc_zone_unregister(system_default_zone) (above) and
381 // malloc_zone_register(system_default_zone) (below), i.e. while absence of
382 // system_default_zone, it's possible that another thread calls free(ptr) and
383 // "no zone found" error is hit, crashing the process.
384 malloc_zone_register(system_default_zone);
385
386 // Confirm that our own zone is now the default zone.
387 PA_CHECK(GetDefaultMallocZone() == &g_mac_malloc_zone);
388 g_initialization_is_done.store(true, std::memory_order_release);
389 }
390
391 } // namespace
392
IsDefaultAllocatorPartitionRootInitialized()393 bool IsDefaultAllocatorPartitionRootInitialized() {
394 // Even though zone registration is not thread-safe, let's not make it worse,
395 // and use acquire/release ordering.
396 return g_initialization_is_done.load(std::memory_order_acquire);
397 }
398
399 } // namespace allocator_shim
400
401 #endif // PARTITION_ALLOC_SHIM_ALLOCATOR_SHIM_OVERRIDE_APPLE_DEFAULT_ZONE_H_
402