1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/allocator/early_zone_registration_apple.h"
6
7 #include <mach/mach.h>
8 #include <malloc/malloc.h>
9
10 #include "partition_alloc/partition_alloc_buildflags.h"
11 #include "partition_alloc/shim/early_zone_registration_constants.h"
12
13 // BASE_EXPORT tends to be defined as soon as anything from //base is included.
14 #if defined(BASE_EXPORT)
15 #error "This file cannot depend on //base"
16 #endif
17
18 namespace partition_alloc {
19
20 #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
21
EarlyMallocZoneRegistration()22 void EarlyMallocZoneRegistration() {}
AllowDoublePartitionAllocZoneRegistration()23 void AllowDoublePartitionAllocZoneRegistration() {}
24
25 #else
26
27 extern "C" {
28 // abort_report_np() records the message in a special section that both the
29 // system CrashReporter and Crashpad collect in crash reports. See also in
30 // chrome_exe_main_mac.cc.
31 void abort_report_np(const char* fmt, ...);
32 }
33
34 namespace {
35
36 malloc_zone_t* GetDefaultMallocZone() {
37 // malloc_default_zone() does not return... the default zone, but the
38 // initial one. The default one is the first element of the default zone
39 // array.
40 unsigned int zone_count = 0;
41 vm_address_t* zones = nullptr;
42 kern_return_t result = malloc_get_all_zones(
43 mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
44 if (result != KERN_SUCCESS) {
45 abort_report_np("Cannot enumerate malloc() zones");
46 }
47 return reinterpret_cast<malloc_zone_t*>(zones[0]);
48 }
49
50 } // namespace
51
52 void EarlyMallocZoneRegistration() {
53 // Must have static storage duration, as raw pointers are passed to
54 // libsystem_malloc.
55 static malloc_zone_t g_delegating_zone;
56 static malloc_introspection_t g_delegating_zone_introspect;
57 static malloc_zone_t* g_default_zone;
58
59 // Make sure that the default zone is instantiated.
60 malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
61
62 g_default_zone = GetDefaultMallocZone();
63
64 // The delegating zone:
65 // - Forwards all allocations to the existing default zone
66 // - Does *not* claim to own any memory, meaning that it will always be
67 // skipped in free() in libsystem_malloc.dylib.
68 //
69 // This is a temporary zone, until it gets replaced by PartitionAlloc, inside
70 // the main library. Since the main library depends on many external
71 // libraries, we cannot install PartitionAlloc as the default zone without
72 // concurrency issues.
73 //
74 // Instead, what we do is here, while the process is single-threaded:
75 // - Register the delegating zone as the default one.
76 // - Set the original (libsystem_malloc's) one as the second zone
77 //
78 // Later, when PartitionAlloc initializes, we replace the default (delegating)
79 // zone with ours. The end state is:
80 // 1. PartitionAlloc zone
81 // 2. libsystem_malloc zone
82
83 // Set up of the delegating zone. Note that it doesn't just forward calls to
84 // the default zone. This is because the system zone's malloc_zone_t pointer
85 // actually points to a larger struct, containing allocator metadata. So if we
86 // pass as the first parameter the "simple" delegating zone pointer, then we
87 // immediately crash inside the system zone functions. So we need to replace
88 // the zone pointer as well.
89 //
90 // Calls fall into 4 categories:
91 // - Allocation calls: forwarded to the real system zone
92 // - "Is this pointer yours" calls: always answer no
93 // - free(): Should never be called, but is in practice, see comments below.
94 // - Diagnostics and debugging: these are typically called for every
95 // zone. They are no-ops for us, as we don't want to double-count, or lock
96 // the data structures of the real zone twice.
97
98 // Allocation: Forward to the real zone.
99 g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
100 return g_default_zone->malloc(g_default_zone, size);
101 };
102 g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
103 size_t size) {
104 return g_default_zone->calloc(g_default_zone, num_items, size);
105 };
106 g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
107 return g_default_zone->valloc(g_default_zone, size);
108 };
109 g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
110 return g_default_zone->realloc(g_default_zone, ptr, size);
111 };
112 g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
113 void** results, unsigned num_requested) {
114 return g_default_zone->batch_malloc(g_default_zone, size, results,
115 num_requested);
116 };
117 g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
118 size_t size) {
119 return g_default_zone->memalign(g_default_zone, alignment, size);
120 };
121
122 // Does ptr belong to this zone? Return value is != 0 if so.
123 g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
124 return 0;
125 };
126
127 // Free functions.
128 // The normal path for freeing memory is:
129 // 1. Try all zones in order, call zone->size(ptr)
130 // 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
131 // 3. If no zone matches, crash.
132 //
133 // Since this zone always returns 0 in size() (see above), then zone->free()
134 // should never be called. Unfortunately, this is not the case, as some places
135 // in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
136 // crashing, forward the call. It's the caller's responsibility to use the
137 // same zone for free() as for the allocation (this is in the contract of
138 // malloc_zone_free()).
139 //
140 // However, note that the sequence of calls size() -> free() is not possible
141 // for this zone, as size() always returns 0.
142 g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
143 return g_default_zone->free(g_default_zone, ptr);
144 };
145 g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
146 size_t size) {
147 return g_default_zone->free_definite_size(g_default_zone, ptr, size);
148 };
149 g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
150 unsigned num_to_be_freed) {
151 return g_default_zone->batch_free(g_default_zone, to_be_freed,
152 num_to_be_freed);
153 };
154 #if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
155 g_delegating_zone.try_free_default = [](malloc_zone_t* zone, void* ptr) {
156 return g_default_zone->try_free_default(g_default_zone, ptr);
157 };
158 #endif
159
160 // Diagnostics and debugging.
161 //
162 // Do nothing to reduce memory footprint, the real
163 // zone will do it.
164 g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
165 size_t goal) -> size_t { return 0; };
166
167 // Introspection calls are not all optional, for instance locking and
168 // unlocking before/after fork() is not optional.
169 //
170 // Nothing to enumerate.
171 g_delegating_zone_introspect.enumerator =
172 [](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
173 memory_reader_t reader,
174 vm_range_recorder_t recorder) -> kern_return_t {
175 return KERN_SUCCESS;
176 };
177 // Need to provide a real implementation, it is used for e.g. array sizing.
178 g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
179 size_t size) {
180 return g_default_zone->introspect->good_size(g_default_zone, size);
181 };
182 // Nothing to do.
183 g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
184 return true;
185 };
186 g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
187 boolean_t verbose) {};
188 g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
189 // Do not forward the lock / unlock calls. Since the default zone is still
190 // there, we should not lock here, as it would lock the zone twice (all
191 // zones are locked before fork().). Rather, do nothing, since this fake
192 // zone does not need any locking.
193 g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
194 g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
195 g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
196 // No stats.
197 g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
198 malloc_statistics_t* stats) {};
199 // We are not locked.
200 g_delegating_zone_introspect.zone_locked =
201 [](malloc_zone_t* zone) -> boolean_t { return false; };
202 // Don't support discharge checking.
203 g_delegating_zone_introspect.enable_discharge_checking =
204 [](malloc_zone_t* zone) -> boolean_t { return false; };
205 g_delegating_zone_introspect.disable_discharge_checking =
206 [](malloc_zone_t* zone) {};
207 g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
208 void* memory) {};
209
210 // Could use something lower to support fewer functions, but this is
211 // consistent with the real zone installed by PartitionAlloc.
212 g_delegating_zone.version = allocator_shim::kZoneVersion;
213 g_delegating_zone.introspect = &g_delegating_zone_introspect;
214 // This name is used in PartitionAlloc's initialization to determine whether
215 // it should replace the delegating zone.
216 g_delegating_zone.zone_name = allocator_shim::kDelegatingZoneName;
217
218 // Register puts the new zone at the end, unregister swaps the new zone with
219 // the last one.
220 // The zone array is, after these lines, in order:
221 // 1. |g_default_zone|...|g_delegating_zone|
222 // 2. |g_delegating_zone|...| (no more default)
223 // 3. |g_delegating_zone|...|g_default_zone|
224 malloc_zone_register(&g_delegating_zone);
225 malloc_zone_unregister(g_default_zone);
226 malloc_zone_register(g_default_zone);
227
228 // Make sure that the purgeable zone is after the default one.
229 // Will make g_default_zone take the purgeable zone spot
230 malloc_zone_unregister(purgeable_zone);
231 // Add back the purgeable zone as the last one.
232 malloc_zone_register(purgeable_zone);
233
234 // Final configuration:
235 // |g_delegating_zone|...|g_default_zone|purgeable_zone|
236
237 // Sanity check.
238 if (GetDefaultMallocZone() != &g_delegating_zone) {
239 abort_report_np("Failed to install the delegating zone as default.");
240 }
241 }
242
243 void AllowDoublePartitionAllocZoneRegistration() {
244 unsigned int zone_count = 0;
245 vm_address_t* zones = nullptr;
246 kern_return_t result = malloc_get_all_zones(
247 mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
248 if (result != KERN_SUCCESS) {
249 abort_report_np("Cannot enumerate malloc() zones");
250 }
251
252 // If PartitionAlloc is one of the zones, *change* its name so that
253 // registration can happen multiple times. This works because zone
254 // registration only keeps a pointer to the struct, it does not copy the data.
255 for (unsigned int i = 0; i < zone_count; i++) {
256 malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
257 if (zone->zone_name &&
258 strcmp(zone->zone_name, allocator_shim::kPartitionAllocZoneName) == 0) {
259 zone->zone_name = "RenamedPartitionAlloc";
260 break;
261 }
262 }
263 }
264
265 #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
266 } // namespace partition_alloc
267