xref: /aosp_15_r20/external/jemalloc_new/src/arena.c (revision 1208bc7e437ced7eb82efac44ba17e3beba411da)
1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/div.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/rtree.h"
11 #include "jemalloc/internal/size_classes.h"
12 #include "jemalloc/internal/util.h"
13 
14 /******************************************************************************/
15 /* Data. */
16 
17 /*
18  * Define names for both unininitialized and initialized phases, so that
19  * options and mallctl processing are straightforward.
20  */
21 const char *percpu_arena_mode_names[] = {
22 	"percpu",
23 	"phycpu",
24 	"disabled",
25 	"percpu",
26 	"phycpu"
27 };
28 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
29 
30 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
31 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
32 
33 static atomic_zd_t dirty_decay_ms_default;
34 static atomic_zd_t muzzy_decay_ms_default;
35 
36 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
37 #define STEP(step, h, x, y)			\
38 		h,
39 		SMOOTHSTEP
40 #undef STEP
41 };
42 
43 static div_info_t arena_binind_div_info[NBINS];
44 
45 /******************************************************************************/
46 /*
47  * Function prototypes for static functions that are referenced prior to
48  * definition.
49  */
50 
51 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
52     arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
53     size_t npages_decay_max, bool is_background_thread);
54 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
55     bool is_background_thread, bool all);
56 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
57     bin_t *bin);
58 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
59     bin_t *bin);
60 
61 /******************************************************************************/
62 
63 void
arena_basic_stats_merge(UNUSED tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy)64 arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
65     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
66     size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
67 	*nthreads += arena_nthreads_get(arena, false);
68 	*dss = dss_prec_names[arena_dss_prec_get(arena)];
69 	*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
70 	*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
71 	*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
72 	*ndirty += extents_npages_get(&arena->extents_dirty);
73 	*nmuzzy += extents_npages_get(&arena->extents_muzzy);
74 }
75 
76 void
arena_stats_merge(tsdn_t * tsdn,arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * dirty_decay_ms,ssize_t * muzzy_decay_ms,size_t * nactive,size_t * ndirty,size_t * nmuzzy,arena_stats_t * astats,bin_stats_t * bstats,arena_stats_large_t * lstats)77 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
78     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
79     size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
80     bin_stats_t *bstats, arena_stats_large_t *lstats) {
81 	cassert(config_stats);
82 
83 	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
84 	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
85 
86 	size_t base_allocated, base_resident, base_mapped, metadata_thp;
87 	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
88 	    &base_mapped, &metadata_thp);
89 
90 	arena_stats_lock(tsdn, &arena->stats);
91 
92 	arena_stats_accum_zu(&astats->mapped, base_mapped
93 	    + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
94 #if !defined(ANDROID_MINIMIZE_STRUCTS)
95 	arena_stats_accum_zu(&astats->retained,
96 	    extents_npages_get(&arena->extents_retained) << LG_PAGE);
97 
98 	arena_stats_accum_u64(&astats->decay_dirty.npurge,
99 	    arena_stats_read_u64(tsdn, &arena->stats,
100 	    &arena->stats.decay_dirty.npurge));
101 	arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
102 	    arena_stats_read_u64(tsdn, &arena->stats,
103 	    &arena->stats.decay_dirty.nmadvise));
104 	arena_stats_accum_u64(&astats->decay_dirty.purged,
105 	    arena_stats_read_u64(tsdn, &arena->stats,
106 	    &arena->stats.decay_dirty.purged));
107 
108 	arena_stats_accum_u64(&astats->decay_muzzy.npurge,
109 	    arena_stats_read_u64(tsdn, &arena->stats,
110 	    &arena->stats.decay_muzzy.npurge));
111 	arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
112 	    arena_stats_read_u64(tsdn, &arena->stats,
113 	    &arena->stats.decay_muzzy.nmadvise));
114 	arena_stats_accum_u64(&astats->decay_muzzy.purged,
115 	    arena_stats_read_u64(tsdn, &arena->stats,
116 	    &arena->stats.decay_muzzy.purged));
117 
118 	arena_stats_accum_zu(&astats->base, base_allocated);
119 	arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
120 	arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
121 	arena_stats_accum_zu(&astats->resident, base_resident +
122 	    (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
123 	    extents_npages_get(&arena->extents_dirty) +
124 	    extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
125 #endif
126 
127 	for (szind_t i = 0; i < NSIZES - NBINS; i++) {
128 		uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
129 		    &arena->stats.lstats[i].nmalloc);
130 		arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
131 #if !defined(ANDROID_MINIMIZE_STRUCTS)
132 		arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
133 #endif
134 
135 		uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
136 		    &arena->stats.lstats[i].ndalloc);
137 		arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
138 #if !defined(ANDROID_MINIMIZE_STRUCTS)
139 		arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
140 #endif
141 
142 #if !defined(ANDROID_MINIMIZE_STRUCTS)
143 		uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
144 		    &arena->stats.lstats[i].nrequests);
145 		arena_stats_accum_u64(&lstats[i].nrequests,
146 		    nmalloc + nrequests);
147 		arena_stats_accum_u64(&astats->nrequests_large,
148 		    nmalloc + nrequests);
149 #endif
150 
151 #if !defined(ANDROID_MINIMIZE_STRUCTS)
152 		assert(nmalloc >= ndalloc);
153 		assert(nmalloc - ndalloc <= SIZE_T_MAX);
154 		size_t curlextents = (size_t)(nmalloc - ndalloc);
155 		lstats[i].curlextents += curlextents;
156 		arena_stats_accum_zu(&astats->allocated_large,
157 		    curlextents * sz_index2size(NBINS + i));
158 #endif
159 	}
160 
161 	arena_stats_unlock(tsdn, &arena->stats);
162 
163 #if !defined(ANDROID_MINIMIZE_STRUCTS) && defined(ANDROID_ENABLE_TCACHE)
164 	/* tcache_bytes counts currently cached bytes. */
165 	atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
166 	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
167 	cache_bin_array_descriptor_t *descriptor;
168 	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
169 		szind_t i = 0;
170 		for (; i < NBINS; i++) {
171 			cache_bin_t *tbin = &descriptor->bins_small[i];
172 			arena_stats_accum_zu(&astats->tcache_bytes,
173 			    tbin->ncached * sz_index2size(i));
174 		}
175 		for (; i < nhbins; i++) {
176 			cache_bin_t *tbin = &descriptor->bins_large[i];
177 			arena_stats_accum_zu(&astats->tcache_bytes,
178 			    tbin->ncached * sz_index2size(i));
179 		}
180 	}
181 	malloc_mutex_prof_read(tsdn,
182 	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
183 	    &arena->tcache_ql_mtx);
184 	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
185 #endif
186 
187 #if !defined(ANDROID_MINIMIZE_STRUCTS)
188 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
189     malloc_mutex_lock(tsdn, &arena->mtx);				\
190     malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
191         &arena->mtx);							\
192     malloc_mutex_unlock(tsdn, &arena->mtx);
193 
194 	/* Gather per arena mutex profiling data. */
195 	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
196 	READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
197 	    arena_prof_mutex_extent_avail)
198 	READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
199 	    arena_prof_mutex_extents_dirty)
200 	READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
201 	    arena_prof_mutex_extents_muzzy)
202 	READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
203 	    arena_prof_mutex_extents_retained)
204 	READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
205 	    arena_prof_mutex_decay_dirty)
206 	READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
207 	    arena_prof_mutex_decay_muzzy)
208 	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
209 	    arena_prof_mutex_base)
210 #undef READ_ARENA_MUTEX_PROF_DATA
211 #endif
212 
213 	nstime_copy(&astats->uptime, &arena->create_time);
214 	nstime_update(&astats->uptime);
215 	nstime_subtract(&astats->uptime, &arena->create_time);
216 
217 	for (szind_t i = 0; i < NBINS; i++) {
218 		bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
219 	}
220 }
221 
222 void
arena_extents_dirty_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)223 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
224     extent_hooks_t **r_extent_hooks, extent_t *extent) {
225 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
226 	    WITNESS_RANK_CORE, 0);
227 
228 	extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
229 	    extent);
230 	if (arena_dirty_decay_ms_get(arena) == 0) {
231 		arena_decay_dirty(tsdn, arena, false, true);
232 	} else {
233 		arena_background_thread_inactivity_check(tsdn, arena, false);
234 	}
235 }
236 
237 static void *
arena_slab_reg_alloc(extent_t * slab,const bin_info_t * bin_info)238 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
239 	void *ret;
240 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
241 	size_t regind;
242 
243 	assert(extent_nfree_get(slab) > 0);
244 	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
245 
246 	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
247 	ret = (void *)((uintptr_t)extent_addr_get(slab) +
248 	    (uintptr_t)(bin_info->reg_size * regind));
249 	extent_nfree_dec(slab);
250 	return ret;
251 }
252 
253 #ifndef JEMALLOC_JET
254 static
255 #endif
256 size_t
arena_slab_regind(extent_t * slab,szind_t binind,const void * ptr)257 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
258 	size_t diff, regind;
259 
260 	/* Freeing a pointer outside the slab can cause assertion failure. */
261 	assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
262 	assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
263 	/* Freeing an interior pointer can cause assertion failure. */
264 	assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
265 	    (uintptr_t)bin_infos[binind].reg_size == 0);
266 
267 	diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
268 
269 	/* Avoid doing division with a variable divisor. */
270 	regind = div_compute(&arena_binind_div_info[binind], diff);
271 
272 	assert(regind < bin_infos[binind].nregs);
273 
274 	return regind;
275 }
276 
277 static void
arena_slab_reg_dalloc(extent_t * slab,arena_slab_data_t * slab_data,void * ptr)278 arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
279 	szind_t binind = extent_szind_get(slab);
280 	const bin_info_t *bin_info = &bin_infos[binind];
281 	size_t regind = arena_slab_regind(slab, binind, ptr);
282 
283 	assert(extent_nfree_get(slab) < bin_info->nregs);
284 	/* Freeing an unallocated pointer can cause assertion failure. */
285 	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
286 
287 	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
288 	extent_nfree_inc(slab);
289 }
290 
291 static void
arena_nactive_add(arena_t * arena,size_t add_pages)292 arena_nactive_add(arena_t *arena, size_t add_pages) {
293 	atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
294 }
295 
296 static void
arena_nactive_sub(arena_t * arena,size_t sub_pages)297 arena_nactive_sub(arena_t *arena, size_t sub_pages) {
298 	assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
299 	atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
300 }
301 
302 static void
arena_large_malloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)303 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
304 	szind_t index, hindex;
305 
306 	cassert(config_stats);
307 
308 	if (usize < LARGE_MINCLASS) {
309 		usize = LARGE_MINCLASS;
310 	}
311 	index = sz_size2index(usize);
312 	hindex = (index >= NBINS) ? index - NBINS : 0;
313 
314 	arena_stats_add_u64(tsdn, &arena->stats,
315 	    &arena->stats.lstats[hindex].nmalloc, 1);
316 }
317 
318 static void
arena_large_dalloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t usize)319 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
320 	szind_t index, hindex;
321 
322 	cassert(config_stats);
323 
324 	if (usize < LARGE_MINCLASS) {
325 		usize = LARGE_MINCLASS;
326 	}
327 	index = sz_size2index(usize);
328 	hindex = (index >= NBINS) ? index - NBINS : 0;
329 
330 	arena_stats_add_u64(tsdn, &arena->stats,
331 	    &arena->stats.lstats[hindex].ndalloc, 1);
332 }
333 
334 static void
arena_large_ralloc_stats_update(tsdn_t * tsdn,arena_t * arena,size_t oldusize,size_t usize)335 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
336     size_t usize) {
337 	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
338 	arena_large_malloc_stats_update(tsdn, arena, usize);
339 }
340 
341 extent_t *
arena_extent_alloc_large(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool * zero)342 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
343     size_t alignment, bool *zero) {
344 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
345 
346 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
347 	    WITNESS_RANK_CORE, 0);
348 
349 	szind_t szind = sz_size2index(usize);
350 	size_t mapped_add;
351 	bool commit = true;
352 	extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
353 	    &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
354 	    szind, zero, &commit);
355 	if (extent == NULL) {
356 		extent = extents_alloc(tsdn, arena, &extent_hooks,
357 		    &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
358 		    false, szind, zero, &commit);
359 	}
360 	size_t size = usize + sz_large_pad;
361 	if (extent == NULL) {
362 		extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
363 		    usize, sz_large_pad, alignment, false, szind, zero,
364 		    &commit);
365 		if (config_stats) {
366 			/*
367 			 * extent may be NULL on OOM, but in that case
368 			 * mapped_add isn't used below, so there's no need to
369 			 * conditionlly set it to 0 here.
370 			 */
371 			mapped_add = size;
372 		}
373 	} else if (config_stats) {
374 		mapped_add = 0;
375 	}
376 
377 	if (extent != NULL) {
378 		if (config_stats) {
379 			arena_stats_lock(tsdn, &arena->stats);
380 			arena_large_malloc_stats_update(tsdn, arena, usize);
381 			if (mapped_add != 0) {
382 				arena_stats_add_zu(tsdn, &arena->stats,
383 				    &arena->stats.mapped, mapped_add);
384 			}
385 			arena_stats_unlock(tsdn, &arena->stats);
386 		}
387 		arena_nactive_add(arena, size >> LG_PAGE);
388 	}
389 
390 	return extent;
391 }
392 
393 void
arena_extent_dalloc_large_prep(tsdn_t * tsdn,arena_t * arena,extent_t * extent)394 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
395 	if (config_stats) {
396 		arena_stats_lock(tsdn, &arena->stats);
397 		arena_large_dalloc_stats_update(tsdn, arena,
398 		    extent_usize_get(extent));
399 		arena_stats_unlock(tsdn, &arena->stats);
400 	}
401 	arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
402 }
403 
404 void
arena_extent_ralloc_large_shrink(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)405 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
406     size_t oldusize) {
407 	size_t usize = extent_usize_get(extent);
408 	size_t udiff = oldusize - usize;
409 
410 	if (config_stats) {
411 		arena_stats_lock(tsdn, &arena->stats);
412 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
413 		arena_stats_unlock(tsdn, &arena->stats);
414 	}
415 	arena_nactive_sub(arena, udiff >> LG_PAGE);
416 }
417 
418 void
arena_extent_ralloc_large_expand(tsdn_t * tsdn,arena_t * arena,extent_t * extent,size_t oldusize)419 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
420     size_t oldusize) {
421 	size_t usize = extent_usize_get(extent);
422 	size_t udiff = usize - oldusize;
423 
424 	if (config_stats) {
425 		arena_stats_lock(tsdn, &arena->stats);
426 		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
427 		arena_stats_unlock(tsdn, &arena->stats);
428 	}
429 	arena_nactive_add(arena, udiff >> LG_PAGE);
430 }
431 
432 static ssize_t
arena_decay_ms_read(arena_decay_t * decay)433 arena_decay_ms_read(arena_decay_t *decay) {
434 	return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
435 }
436 
437 static void
arena_decay_ms_write(arena_decay_t * decay,ssize_t decay_ms)438 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
439 	atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
440 }
441 
442 static void
arena_decay_deadline_init(arena_decay_t * decay)443 arena_decay_deadline_init(arena_decay_t *decay) {
444 	/*
445 	 * Generate a new deadline that is uniformly random within the next
446 	 * epoch after the current one.
447 	 */
448 	nstime_copy(&decay->deadline, &decay->epoch);
449 	nstime_add(&decay->deadline, &decay->interval);
450 	if (arena_decay_ms_read(decay) > 0) {
451 		nstime_t jitter;
452 
453 		nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
454 		    nstime_ns(&decay->interval)));
455 		nstime_add(&decay->deadline, &jitter);
456 	}
457 }
458 
459 static bool
arena_decay_deadline_reached(const arena_decay_t * decay,const nstime_t * time)460 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
461 	return (nstime_compare(&decay->deadline, time) <= 0);
462 }
463 
464 static size_t
arena_decay_backlog_npages_limit(const arena_decay_t * decay)465 arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
466 	uint64_t sum;
467 	size_t npages_limit_backlog;
468 	unsigned i;
469 
470 	/*
471 	 * For each element of decay_backlog, multiply by the corresponding
472 	 * fixed-point smoothstep decay factor.  Sum the products, then divide
473 	 * to round down to the nearest whole number of pages.
474 	 */
475 	sum = 0;
476 	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
477 		sum += decay->backlog[i] * h_steps[i];
478 	}
479 	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
480 
481 	return npages_limit_backlog;
482 }
483 
484 static void
arena_decay_backlog_update_last(arena_decay_t * decay,size_t current_npages)485 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
486 	size_t npages_delta = (current_npages > decay->nunpurged) ?
487 	    current_npages - decay->nunpurged : 0;
488 	decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
489 
490 #if !defined(ANDROID_MINIMIZE_STRUCTS)
491 	if (config_debug) {
492 		if (current_npages > decay->ceil_npages) {
493 			decay->ceil_npages = current_npages;
494 		}
495 		size_t npages_limit = arena_decay_backlog_npages_limit(decay);
496 		assert(decay->ceil_npages >= npages_limit);
497 		if (decay->ceil_npages > npages_limit) {
498 			decay->ceil_npages = npages_limit;
499 		}
500 	}
501 #endif
502 }
503 
504 static void
arena_decay_backlog_update(arena_decay_t * decay,uint64_t nadvance_u64,size_t current_npages)505 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
506     size_t current_npages) {
507 	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
508 		memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
509 		    sizeof(size_t));
510 	} else {
511 		size_t nadvance_z = (size_t)nadvance_u64;
512 
513 		assert((uint64_t)nadvance_z == nadvance_u64);
514 
515 		memmove(decay->backlog, &decay->backlog[nadvance_z],
516 		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
517 		if (nadvance_z > 1) {
518 			memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
519 			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
520 		}
521 	}
522 
523 	arena_decay_backlog_update_last(decay, current_npages);
524 }
525 
526 static void
arena_decay_try_purge(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,size_t current_npages,size_t npages_limit,bool is_background_thread)527 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
528     extents_t *extents, size_t current_npages, size_t npages_limit,
529     bool is_background_thread) {
530 	if (current_npages > npages_limit) {
531 		arena_decay_to_limit(tsdn, arena, decay, extents, false,
532 		    npages_limit, current_npages - npages_limit,
533 		    is_background_thread);
534 	}
535 }
536 
537 static void
arena_decay_epoch_advance_helper(arena_decay_t * decay,const nstime_t * time,size_t current_npages)538 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
539     size_t current_npages) {
540 	assert(arena_decay_deadline_reached(decay, time));
541 
542 	nstime_t delta;
543 	nstime_copy(&delta, time);
544 	nstime_subtract(&delta, &decay->epoch);
545 
546 	uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
547 	assert(nadvance_u64 > 0);
548 
549 	/* Add nadvance_u64 decay intervals to epoch. */
550 	nstime_copy(&delta, &decay->interval);
551 	nstime_imultiply(&delta, nadvance_u64);
552 	nstime_add(&decay->epoch, &delta);
553 
554 	/* Set a new deadline. */
555 	arena_decay_deadline_init(decay);
556 
557 	/* Update the backlog. */
558 	arena_decay_backlog_update(decay, nadvance_u64, current_npages);
559 }
560 
561 static void
arena_decay_epoch_advance(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,const nstime_t * time,bool is_background_thread)562 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
563     extents_t *extents, const nstime_t *time, bool is_background_thread) {
564 	size_t current_npages = extents_npages_get(extents);
565 	arena_decay_epoch_advance_helper(decay, time, current_npages);
566 
567 	size_t npages_limit = arena_decay_backlog_npages_limit(decay);
568 	/* We may unlock decay->mtx when try_purge(). Finish logging first. */
569 	decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
570 	    current_npages;
571 
572 	if (!background_thread_enabled() || is_background_thread) {
573 		arena_decay_try_purge(tsdn, arena, decay, extents,
574 		    current_npages, npages_limit, is_background_thread);
575 	}
576 }
577 
578 static void
arena_decay_reinit(arena_decay_t * decay,ssize_t decay_ms)579 arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
580 	arena_decay_ms_write(decay, decay_ms);
581 	if (decay_ms > 0) {
582 		nstime_init(&decay->interval, (uint64_t)decay_ms *
583 		    KQU(1000000));
584 		nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
585 	}
586 
587 	nstime_init(&decay->epoch, 0);
588 	nstime_update(&decay->epoch);
589 	decay->jitter_state = (uint64_t)(uintptr_t)decay;
590 	arena_decay_deadline_init(decay);
591 	decay->nunpurged = 0;
592 	memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
593 }
594 
595 static bool
arena_decay_init(arena_decay_t * decay,ssize_t decay_ms,arena_stats_decay_t * stats)596 arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
597     arena_stats_decay_t *stats) {
598 #if !defined(ANDROID_MINIMIZE_STRUCTS)
599 	if (config_debug) {
600 		for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
601 			assert(((char *)decay)[i] == 0);
602 		}
603 		decay->ceil_npages = 0;
604 	}
605 #endif
606 	if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
607 	    malloc_mutex_rank_exclusive)) {
608 		return true;
609 	}
610 	decay->purging = false;
611 	arena_decay_reinit(decay, decay_ms);
612 #if !defined(ANDROID_MINIMIZE_STRUCTS)
613 	/* Memory is zeroed, so there is no need to clear stats. */
614 	if (config_stats) {
615 		decay->stats = stats;
616 	}
617 #endif
618 	return false;
619 }
620 
621 static bool
arena_decay_ms_valid(ssize_t decay_ms)622 arena_decay_ms_valid(ssize_t decay_ms) {
623 	if (decay_ms < -1) {
624 		return false;
625 	}
626 	if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
627 	    KQU(1000)) {
628 		return true;
629 	}
630 	return false;
631 }
632 
633 static bool
arena_maybe_decay(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread)634 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
635     extents_t *extents, bool is_background_thread) {
636 	malloc_mutex_assert_owner(tsdn, &decay->mtx);
637 
638 	/* Purge all or nothing if the option is disabled. */
639 	ssize_t decay_ms = arena_decay_ms_read(decay);
640 	if (decay_ms <= 0) {
641 		if (decay_ms == 0) {
642 			arena_decay_to_limit(tsdn, arena, decay, extents, false,
643 			    0, extents_npages_get(extents),
644 			    is_background_thread);
645 		}
646 		return false;
647 	}
648 
649 	nstime_t time;
650 	nstime_init(&time, 0);
651 	nstime_update(&time);
652 	if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
653 	    > 0)) {
654 		/*
655 		 * Time went backwards.  Move the epoch back in time and
656 		 * generate a new deadline, with the expectation that time
657 		 * typically flows forward for long enough periods of time that
658 		 * epochs complete.  Unfortunately, this strategy is susceptible
659 		 * to clock jitter triggering premature epoch advances, but
660 		 * clock jitter estimation and compensation isn't feasible here
661 		 * because calls into this code are event-driven.
662 		 */
663 		nstime_copy(&decay->epoch, &time);
664 		arena_decay_deadline_init(decay);
665 	} else {
666 		/* Verify that time does not go backwards. */
667 		assert(nstime_compare(&decay->epoch, &time) <= 0);
668 	}
669 
670 	/*
671 	 * If the deadline has been reached, advance to the current epoch and
672 	 * purge to the new limit if necessary.  Note that dirty pages created
673 	 * during the current epoch are not subject to purge until a future
674 	 * epoch, so as a result purging only happens during epoch advances, or
675 	 * being triggered by background threads (scheduled event).
676 	 */
677 	bool advance_epoch = arena_decay_deadline_reached(decay, &time);
678 	if (advance_epoch) {
679 		arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
680 		    is_background_thread);
681 	} else if (is_background_thread) {
682 		arena_decay_try_purge(tsdn, arena, decay, extents,
683 		    extents_npages_get(extents),
684 		    arena_decay_backlog_npages_limit(decay),
685 		    is_background_thread);
686 	}
687 
688 	return advance_epoch;
689 }
690 
691 static ssize_t
arena_decay_ms_get(arena_decay_t * decay)692 arena_decay_ms_get(arena_decay_t *decay) {
693 	return arena_decay_ms_read(decay);
694 }
695 
696 ssize_t
arena_dirty_decay_ms_get(arena_t * arena)697 arena_dirty_decay_ms_get(arena_t *arena) {
698 	return arena_decay_ms_get(&arena->decay_dirty);
699 }
700 
701 ssize_t
arena_muzzy_decay_ms_get(arena_t * arena)702 arena_muzzy_decay_ms_get(arena_t *arena) {
703 	return arena_decay_ms_get(&arena->decay_muzzy);
704 }
705 
706 static bool
arena_decay_ms_set(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,ssize_t decay_ms)707 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
708     extents_t *extents, ssize_t decay_ms) {
709 	if (!arena_decay_ms_valid(decay_ms)) {
710 		return true;
711 	}
712 
713 	malloc_mutex_lock(tsdn, &decay->mtx);
714 	/*
715 	 * Restart decay backlog from scratch, which may cause many dirty pages
716 	 * to be immediately purged.  It would conceptually be possible to map
717 	 * the old backlog onto the new backlog, but there is no justification
718 	 * for such complexity since decay_ms changes are intended to be
719 	 * infrequent, either between the {-1, 0, >0} states, or a one-time
720 	 * arbitrary change during initial arena configuration.
721 	 */
722 	arena_decay_reinit(decay, decay_ms);
723 	arena_maybe_decay(tsdn, arena, decay, extents, false);
724 	malloc_mutex_unlock(tsdn, &decay->mtx);
725 
726 	return false;
727 }
728 
729 bool
arena_dirty_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)730 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
731     ssize_t decay_ms) {
732 	return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
733 	    &arena->extents_dirty, decay_ms);
734 }
735 
736 bool
arena_muzzy_decay_ms_set(tsdn_t * tsdn,arena_t * arena,ssize_t decay_ms)737 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
738     ssize_t decay_ms) {
739 	return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
740 	    &arena->extents_muzzy, decay_ms);
741 }
742 
743 static size_t
arena_stash_decayed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_limit,size_t npages_decay_max,extent_list_t * decay_extents)744 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
745     extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
746 	size_t npages_decay_max, extent_list_t *decay_extents) {
747 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
748 	    WITNESS_RANK_CORE, 0);
749 
750 	/* Stash extents according to npages_limit. */
751 	size_t nstashed = 0;
752 	extent_t *extent;
753 	while (nstashed < npages_decay_max &&
754 	    (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
755 	    npages_limit)) != NULL) {
756 		extent_list_append(decay_extents, extent);
757 		nstashed += extent_size_get(extent) >> LG_PAGE;
758 	}
759 	return nstashed;
760 }
761 
762 static size_t
arena_decay_stashed(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,arena_decay_t * decay,extents_t * extents,bool all,extent_list_t * decay_extents,bool is_background_thread)763 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
764     extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
765     bool all, extent_list_t *decay_extents, bool is_background_thread) {
766 	UNUSED size_t nmadvise, nunmapped;
767 	size_t npurged;
768 
769 	if (config_stats) {
770 		nmadvise = 0;
771 		nunmapped = 0;
772 	}
773 	npurged = 0;
774 
775 	ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
776 	for (extent_t *extent = extent_list_first(decay_extents); extent !=
777 	    NULL; extent = extent_list_first(decay_extents)) {
778 		if (config_stats) {
779 			nmadvise++;
780 		}
781 		size_t npages = extent_size_get(extent) >> LG_PAGE;
782 		npurged += npages;
783 		extent_list_remove(decay_extents, extent);
784 		switch (extents_state_get(extents)) {
785 		case extent_state_active:
786 			not_reached();
787 		case extent_state_dirty:
788 			if (!all && muzzy_decay_ms != 0 &&
789 			    !extent_purge_lazy_wrapper(tsdn, arena,
790 			    r_extent_hooks, extent, 0,
791 			    extent_size_get(extent))) {
792 				extents_dalloc(tsdn, arena, r_extent_hooks,
793 				    &arena->extents_muzzy, extent);
794 				arena_background_thread_inactivity_check(tsdn,
795 				    arena, is_background_thread);
796 				break;
797 			}
798 			/* Fall through. */
799 		case extent_state_muzzy:
800 			extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
801 			    extent);
802 			if (config_stats) {
803 				nunmapped += npages;
804 			}
805 			break;
806 		case extent_state_retained:
807 		default:
808 			not_reached();
809 		}
810 	}
811 
812 	if (config_stats) {
813 		arena_stats_lock(tsdn, &arena->stats);
814 #if !defined(ANDROID_MINIMIZE_STRUCTS)
815 		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
816 		    1);
817 		arena_stats_add_u64(tsdn, &arena->stats,
818 		    &decay->stats->nmadvise, nmadvise);
819 		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
820 		    npurged);
821 #endif
822 		arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
823 		    nunmapped << LG_PAGE);
824 		arena_stats_unlock(tsdn, &arena->stats);
825 	}
826 
827 	return npurged;
828 }
829 
830 /*
831  * npages_limit: Decay at most npages_decay_max pages without violating the
832  * invariant: (extents_npages_get(extents) >= npages_limit).  We need an upper
833  * bound on number of pages in order to prevent unbounded growth (namely in
834  * stashed), otherwise unbounded new pages could be added to extents during the
835  * current decay run, so that the purging thread never finishes.
836  */
837 static void
arena_decay_to_limit(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool all,size_t npages_limit,size_t npages_decay_max,bool is_background_thread)838 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
839     extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
840     bool is_background_thread) {
841 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
842 	    WITNESS_RANK_CORE, 1);
843 	malloc_mutex_assert_owner(tsdn, &decay->mtx);
844 
845 	if (decay->purging) {
846 		return;
847 	}
848 	decay->purging = true;
849 	malloc_mutex_unlock(tsdn, &decay->mtx);
850 
851 	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
852 
853 	extent_list_t decay_extents;
854 	extent_list_init(&decay_extents);
855 
856 	size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
857 	    npages_limit, npages_decay_max, &decay_extents);
858 	if (npurge != 0) {
859 		UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
860 		    &extent_hooks, decay, extents, all, &decay_extents,
861 		    is_background_thread);
862 		assert(npurged == npurge);
863 	}
864 
865 	malloc_mutex_lock(tsdn, &decay->mtx);
866 	decay->purging = false;
867 }
868 
869 static bool
arena_decay_impl(tsdn_t * tsdn,arena_t * arena,arena_decay_t * decay,extents_t * extents,bool is_background_thread,bool all)870 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
871     extents_t *extents, bool is_background_thread, bool all) {
872 	if (all) {
873 		malloc_mutex_lock(tsdn, &decay->mtx);
874 		arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
875 		    extents_npages_get(extents), is_background_thread);
876 		malloc_mutex_unlock(tsdn, &decay->mtx);
877 
878 		return false;
879 	}
880 
881 	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
882 		/* No need to wait if another thread is in progress. */
883 		return true;
884 	}
885 
886 	bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
887 	    is_background_thread);
888 	UNUSED size_t npages_new;
889 	if (epoch_advanced) {
890 		/* Backlog is updated on epoch advance. */
891 		npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
892 	}
893 	malloc_mutex_unlock(tsdn, &decay->mtx);
894 
895 	if (have_background_thread && background_thread_enabled() &&
896 	    epoch_advanced && !is_background_thread) {
897 		background_thread_interval_check(tsdn, arena, decay,
898 		    npages_new);
899 	}
900 
901 	return false;
902 }
903 
904 static bool
arena_decay_dirty(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)905 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
906     bool all) {
907 	return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
908 	    &arena->extents_dirty, is_background_thread, all);
909 }
910 
911 static bool
arena_decay_muzzy(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)912 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
913     bool all) {
914 	return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
915 	    &arena->extents_muzzy, is_background_thread, all);
916 }
917 
918 void
arena_decay(tsdn_t * tsdn,arena_t * arena,bool is_background_thread,bool all)919 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
920 	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
921 		return;
922 	}
923 	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
924 }
925 
926 static void
arena_slab_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * slab)927 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
928 	arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
929 
930 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
931 	arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
932 }
933 
934 static void
arena_bin_slabs_nonfull_insert(bin_t * bin,extent_t * slab)935 arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
936 	assert(extent_nfree_get(slab) > 0);
937 	extent_heap_insert(&bin->slabs_nonfull, slab);
938 }
939 
940 static void
arena_bin_slabs_nonfull_remove(bin_t * bin,extent_t * slab)941 arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
942 	extent_heap_remove(&bin->slabs_nonfull, slab);
943 }
944 
945 static extent_t *
arena_bin_slabs_nonfull_tryget(bin_t * bin)946 arena_bin_slabs_nonfull_tryget(bin_t *bin) {
947 	extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
948 	if (slab == NULL) {
949 		return NULL;
950 	}
951 #if !defined(ANDROID_MINIMIZE_STRUCTS)
952 	if (config_stats) {
953 		bin->stats.reslabs++;
954 	}
955 #endif
956 	return slab;
957 }
958 
959 static void
arena_bin_slabs_full_insert(arena_t * arena,bin_t * bin,extent_t * slab)960 arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
961 	assert(extent_nfree_get(slab) == 0);
962 	/*
963 	 *  Tracking extents is required by arena_reset, which is not allowed
964 	 *  for auto arenas.  Bypass this step to avoid touching the extent
965 	 *  linkage (often results in cache misses) for auto arenas.
966 	 */
967 	if (arena_is_auto(arena)) {
968 		return;
969 	}
970 	extent_list_append(&bin->slabs_full, slab);
971 }
972 
973 static void
arena_bin_slabs_full_remove(arena_t * arena,bin_t * bin,extent_t * slab)974 arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
975 	if (arena_is_auto(arena)) {
976 		return;
977 	}
978 	extent_list_remove(&bin->slabs_full, slab);
979 }
980 
981 void
arena_reset(tsd_t * tsd,arena_t * arena)982 arena_reset(tsd_t *tsd, arena_t *arena) {
983 	/*
984 	 * Locking in this function is unintuitive.  The caller guarantees that
985 	 * no concurrent operations are happening in this arena, but there are
986 	 * still reasons that some locking is necessary:
987 	 *
988 	 * - Some of the functions in the transitive closure of calls assume
989 	 *   appropriate locks are held, and in some cases these locks are
990 	 *   temporarily dropped to avoid lock order reversal or deadlock due to
991 	 *   reentry.
992 	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
993 	 *   strictly speaking this is a "concurrent operation", disallowing
994 	 *   stats refreshes would impose an inconvenient burden.
995 	 */
996 
997 	/* Large allocations. */
998 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
999 
1000 	for (extent_t *extent = extent_list_first(&arena->large); extent !=
1001 	    NULL; extent = extent_list_first(&arena->large)) {
1002 		void *ptr = extent_base_get(extent);
1003 		size_t usize;
1004 
1005 		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1006 		alloc_ctx_t alloc_ctx;
1007 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
1008 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
1009 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
1010 		assert(alloc_ctx.szind != NSIZES);
1011 
1012 		if (config_stats || (config_prof && opt_prof)) {
1013 			usize = sz_index2size(alloc_ctx.szind);
1014 			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
1015 		}
1016 		/* Remove large allocation from prof sample set. */
1017 		if (config_prof && opt_prof) {
1018 			prof_free(tsd, ptr, usize, &alloc_ctx);
1019 		}
1020 		large_dalloc(tsd_tsdn(tsd), extent);
1021 		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1022 	}
1023 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1024 
1025 	/* Bins. */
1026 	for (unsigned i = 0; i < NBINS; i++) {
1027 		extent_t *slab;
1028 		bin_t *bin = &arena->bins[i];
1029 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1030 		if (bin->slabcur != NULL) {
1031 			slab = bin->slabcur;
1032 			bin->slabcur = NULL;
1033 			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1034 			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1035 			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1036 		}
1037 		while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
1038 		    NULL) {
1039 			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1040 			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1041 			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1042 		}
1043 		for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1044 		    slab = extent_list_first(&bin->slabs_full)) {
1045 			arena_bin_slabs_full_remove(arena, bin, slab);
1046 			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1047 			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1048 			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1049 		}
1050 		if (config_stats) {
1051 			bin->stats.curregs = 0;
1052 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1053 			bin->stats.curslabs = 0;
1054 #endif
1055 		}
1056 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1057 	}
1058 
1059 	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1060 }
1061 
1062 static void
arena_destroy_retained(tsdn_t * tsdn,arena_t * arena)1063 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1064 	/*
1065 	 * Iterate over the retained extents and destroy them.  This gives the
1066 	 * extent allocator underlying the extent hooks an opportunity to unmap
1067 	 * all retained memory without having to keep its own metadata
1068 	 * structures.  In practice, virtual memory for dss-allocated extents is
1069 	 * leaked here, so best practice is to avoid dss for arenas to be
1070 	 * destroyed, or provide custom extent hooks that track retained
1071 	 * dss-based extents for later reuse.
1072 	 */
1073 	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1074 	extent_t *extent;
1075 	while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1076 	    &arena->extents_retained, 0)) != NULL) {
1077 		extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1078 	}
1079 }
1080 
1081 void
arena_destroy(tsd_t * tsd,arena_t * arena)1082 arena_destroy(tsd_t *tsd, arena_t *arena) {
1083 	assert(base_ind_get(arena->base) >= narenas_auto);
1084 	assert(arena_nthreads_get(arena, false) == 0);
1085 	assert(arena_nthreads_get(arena, true) == 0);
1086 
1087 	/*
1088 	 * No allocations have occurred since arena_reset() was called.
1089 	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1090 	 * extents, so only retained extents may remain.
1091 	 */
1092 	assert(extents_npages_get(&arena->extents_dirty) == 0);
1093 	assert(extents_npages_get(&arena->extents_muzzy) == 0);
1094 
1095 	/* Deallocate retained memory. */
1096 	arena_destroy_retained(tsd_tsdn(tsd), arena);
1097 
1098 	/*
1099 	 * Remove the arena pointer from the arenas array.  We rely on the fact
1100 	 * that there is no way for the application to get a dirty read from the
1101 	 * arenas array unless there is an inherent race in the application
1102 	 * involving access of an arena being concurrently destroyed.  The
1103 	 * application must synchronize knowledge of the arena's validity, so as
1104 	 * long as we use an atomic write to update the arenas array, the
1105 	 * application will get a clean read any time after it synchronizes
1106 	 * knowledge that the arena is no longer valid.
1107 	 */
1108 	arena_set(base_ind_get(arena->base), NULL);
1109 
1110 	/*
1111 	 * Destroy the base allocator, which manages all metadata ever mapped by
1112 	 * this arena.
1113 	 */
1114 	base_delete(tsd_tsdn(tsd), arena->base);
1115 }
1116 
1117 static extent_t *
arena_slab_alloc_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,const bin_info_t * bin_info,szind_t szind)1118 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1119     extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1120     szind_t szind) {
1121 	extent_t *slab;
1122 	bool zero, commit;
1123 
1124 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1125 	    WITNESS_RANK_CORE, 0);
1126 
1127 	zero = false;
1128 	commit = true;
1129 	slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1130 	    bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1131 
1132 	if (config_stats && slab != NULL) {
1133 		arena_stats_mapped_add(tsdn, &arena->stats,
1134 		    bin_info->slab_size);
1135 	}
1136 
1137 	return slab;
1138 }
1139 
1140 static extent_t *
arena_slab_alloc(tsdn_t * tsdn,arena_t * arena,szind_t binind,const bin_info_t * bin_info)1141 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1142     const bin_info_t *bin_info) {
1143 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1144 	    WITNESS_RANK_CORE, 0);
1145 
1146 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1147 	szind_t szind = sz_size2index(bin_info->reg_size);
1148 	bool zero = false;
1149 	bool commit = true;
1150 	extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1151 	    &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1152 	    binind, &zero, &commit);
1153 	if (slab == NULL) {
1154 		slab = extents_alloc(tsdn, arena, &extent_hooks,
1155 		    &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1156 		    true, binind, &zero, &commit);
1157 	}
1158 	if (slab == NULL) {
1159 		slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1160 		    bin_info, szind);
1161 		if (slab == NULL) {
1162 			return NULL;
1163 		}
1164 	}
1165 	assert(extent_slab_get(slab));
1166 
1167 	/* Initialize slab internals. */
1168 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1169 	extent_nfree_set(slab, bin_info->nregs);
1170 	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1171 
1172 	arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1173 
1174 	return slab;
1175 }
1176 
1177 static extent_t *
arena_bin_nonfull_slab_get(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind)1178 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1179     szind_t binind) {
1180 	extent_t *slab;
1181 	const bin_info_t *bin_info;
1182 
1183 	/* Look for a usable slab. */
1184 	slab = arena_bin_slabs_nonfull_tryget(bin);
1185 	if (slab != NULL) {
1186 		return slab;
1187 	}
1188 	/* No existing slabs have any space available. */
1189 
1190 	bin_info = &bin_infos[binind];
1191 
1192 	/* Allocate a new slab. */
1193 	malloc_mutex_unlock(tsdn, &bin->lock);
1194 	/******************************/
1195 	slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
1196 	/********************************/
1197 	malloc_mutex_lock(tsdn, &bin->lock);
1198 	if (slab != NULL) {
1199 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1200 		if (config_stats) {
1201 			bin->stats.nslabs++;
1202 			bin->stats.curslabs++;
1203 		}
1204 #endif
1205 		return slab;
1206 	}
1207 
1208 	/*
1209 	 * arena_slab_alloc() failed, but another thread may have made
1210 	 * sufficient memory available while this one dropped bin->lock above,
1211 	 * so search one more time.
1212 	 */
1213 	slab = arena_bin_slabs_nonfull_tryget(bin);
1214 	if (slab != NULL) {
1215 		return slab;
1216 	}
1217 
1218 	return NULL;
1219 }
1220 
1221 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1222 static void *
arena_bin_malloc_hard(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind)1223 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1224     szind_t binind) {
1225 	const bin_info_t *bin_info;
1226 	extent_t *slab;
1227 
1228 	bin_info = &bin_infos[binind];
1229 	if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1230 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1231 		bin->slabcur = NULL;
1232 	}
1233 	slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
1234 	if (bin->slabcur != NULL) {
1235 		/*
1236 		 * Another thread updated slabcur while this one ran without the
1237 		 * bin lock in arena_bin_nonfull_slab_get().
1238 		 */
1239 		if (extent_nfree_get(bin->slabcur) > 0) {
1240 			void *ret = arena_slab_reg_alloc(bin->slabcur,
1241 			    bin_info);
1242 			if (slab != NULL) {
1243 				/*
1244 				 * arena_slab_alloc() may have allocated slab,
1245 				 * or it may have been pulled from
1246 				 * slabs_nonfull.  Therefore it is unsafe to
1247 				 * make any assumptions about how slab has
1248 				 * previously been used, and
1249 				 * arena_bin_lower_slab() must be called, as if
1250 				 * a region were just deallocated from the slab.
1251 				 */
1252 				if (extent_nfree_get(slab) == bin_info->nregs) {
1253 					arena_dalloc_bin_slab(tsdn, arena, slab,
1254 					    bin);
1255 				} else {
1256 					arena_bin_lower_slab(tsdn, arena, slab,
1257 					    bin);
1258 				}
1259 			}
1260 			return ret;
1261 		}
1262 
1263 		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1264 		bin->slabcur = NULL;
1265 	}
1266 
1267 	if (slab == NULL) {
1268 		return NULL;
1269 	}
1270 	bin->slabcur = slab;
1271 
1272 	assert(extent_nfree_get(bin->slabcur) > 0);
1273 
1274 	return arena_slab_reg_alloc(slab, bin_info);
1275 }
1276 
1277 void
arena_tcache_fill_small(tsdn_t * tsdn,arena_t * arena,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,uint64_t prof_accumbytes)1278 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1279     cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1280 	unsigned i, nfill;
1281 	bin_t *bin;
1282 
1283 	assert(tbin->ncached == 0);
1284 
1285 	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1286 		prof_idump(tsdn);
1287 	}
1288 	bin = &arena->bins[binind];
1289 	malloc_mutex_lock(tsdn, &bin->lock);
1290 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1291 	    tcache->lg_fill_div[binind]); i < nfill; i++) {
1292 		extent_t *slab;
1293 		void *ptr;
1294 		if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1295 		    0) {
1296 			ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1297 		} else {
1298 			ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1299 		}
1300 		if (ptr == NULL) {
1301 			/*
1302 			 * OOM.  tbin->avail isn't yet filled down to its first
1303 			 * element, so the successful allocations (if any) must
1304 			 * be moved just before tbin->avail before bailing out.
1305 			 */
1306 			if (i > 0) {
1307 				memmove(tbin->avail - i, tbin->avail - nfill,
1308 				    i * sizeof(void *));
1309 			}
1310 			break;
1311 		}
1312 		if (config_fill && unlikely(opt_junk_alloc)) {
1313 			arena_alloc_junk_small(ptr, &bin_infos[binind], true);
1314 		}
1315 		/* Insert such that low regions get used first. */
1316 		*(tbin->avail - nfill + i) = ptr;
1317 	}
1318 	if (config_stats) {
1319 		bin->stats.nmalloc += i;
1320 #if defined(ANDROID_ENABLE_TCACHE_STATS)
1321 		bin->stats.nrequests += tbin->tstats.nrequests;
1322 #endif
1323 		bin->stats.curregs += i;
1324 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1325 		bin->stats.nfills++;
1326 #endif
1327 #if defined(ANDROID_ENABLE_TCACHE_STATS)
1328 		tbin->tstats.nrequests = 0;
1329 #endif
1330 	}
1331 	malloc_mutex_unlock(tsdn, &bin->lock);
1332 	tbin->ncached = i;
1333 	arena_decay_tick(tsdn, arena);
1334 }
1335 
1336 void
arena_alloc_junk_small(void * ptr,const bin_info_t * bin_info,bool zero)1337 arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1338 	if (!zero) {
1339 		memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1340 	}
1341 }
1342 
1343 static void
arena_dalloc_junk_small_impl(void * ptr,const bin_info_t * bin_info)1344 arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1345 	memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1346 }
1347 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1348     arena_dalloc_junk_small_impl;
1349 
1350 static void *
arena_malloc_small(tsdn_t * tsdn,arena_t * arena,szind_t binind,bool zero)1351 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1352 	void *ret;
1353 	bin_t *bin;
1354 	size_t usize;
1355 	extent_t *slab;
1356 
1357 	assert(binind < NBINS);
1358 	bin = &arena->bins[binind];
1359 	usize = sz_index2size(binind);
1360 
1361 	malloc_mutex_lock(tsdn, &bin->lock);
1362 	if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1363 		ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1364 	} else {
1365 		ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1366 	}
1367 
1368 	if (ret == NULL) {
1369 		malloc_mutex_unlock(tsdn, &bin->lock);
1370 		return NULL;
1371 	}
1372 
1373 	if (config_stats) {
1374 		bin->stats.nmalloc++;
1375 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1376 		bin->stats.nrequests++;
1377 #endif
1378 		bin->stats.curregs++;
1379 	}
1380 	malloc_mutex_unlock(tsdn, &bin->lock);
1381 	if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1382 		prof_idump(tsdn);
1383 	}
1384 
1385 	if (!zero) {
1386 		if (config_fill) {
1387 			if (unlikely(opt_junk_alloc)) {
1388 				arena_alloc_junk_small(ret,
1389 				    &bin_infos[binind], false);
1390 			} else if (unlikely(opt_zero)) {
1391 				memset(ret, 0, usize);
1392 			}
1393 		}
1394 	} else {
1395 		if (config_fill && unlikely(opt_junk_alloc)) {
1396 			arena_alloc_junk_small(ret, &bin_infos[binind],
1397 			    true);
1398 		}
1399 		memset(ret, 0, usize);
1400 	}
1401 
1402 	arena_decay_tick(tsdn, arena);
1403 	return ret;
1404 }
1405 
1406 void *
arena_malloc_hard(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero)1407 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1408     bool zero) {
1409 	assert(!tsdn_null(tsdn) || arena != NULL);
1410 
1411 	if (likely(!tsdn_null(tsdn))) {
1412 		arena = arena_choose(tsdn_tsd(tsdn), arena);
1413 	}
1414 	if (unlikely(arena == NULL)) {
1415 		return NULL;
1416 	}
1417 
1418 	if (likely(size <= SMALL_MAXCLASS)) {
1419 		return arena_malloc_small(tsdn, arena, ind, zero);
1420 	}
1421 	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1422 }
1423 
1424 void *
arena_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1425 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1426     bool zero, tcache_t *tcache) {
1427 	void *ret;
1428 
1429 	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
1430 	    && (usize & PAGE_MASK) == 0))) {
1431 		/* Small; alignment doesn't require special slab placement. */
1432 		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1433 		    zero, tcache, true);
1434 	} else {
1435 		if (likely(alignment <= CACHELINE)) {
1436 			ret = large_malloc(tsdn, arena, usize, zero);
1437 		} else {
1438 			ret = large_palloc(tsdn, arena, usize, alignment, zero);
1439 		}
1440 	}
1441 	return ret;
1442 }
1443 
1444 void
arena_prof_promote(tsdn_t * tsdn,const void * ptr,size_t usize)1445 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1446 	cassert(config_prof);
1447 	assert(ptr != NULL);
1448 	assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1449 	assert(usize <= SMALL_MAXCLASS);
1450 
1451 	rtree_ctx_t rtree_ctx_fallback;
1452 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1453 
1454 	extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1455 	    (uintptr_t)ptr, true);
1456 	arena_t *arena = extent_arena_get(extent);
1457 
1458 	szind_t szind = sz_size2index(usize);
1459 	extent_szind_set(extent, szind);
1460 	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1461 	    szind, false);
1462 
1463 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1464 	prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1465 #endif
1466 
1467 	assert(isalloc(tsdn, ptr) == usize);
1468 }
1469 
1470 static size_t
arena_prof_demote(tsdn_t * tsdn,extent_t * extent,const void * ptr)1471 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1472 	cassert(config_prof);
1473 	assert(ptr != NULL);
1474 
1475 	extent_szind_set(extent, NBINS);
1476 	rtree_ctx_t rtree_ctx_fallback;
1477 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1478 	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1479 	    NBINS, false);
1480 
1481 	assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1482 
1483 	return LARGE_MINCLASS;
1484 }
1485 
1486 void
arena_dalloc_promoted(tsdn_t * tsdn,void * ptr,tcache_t * tcache,bool slow_path)1487 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1488     bool slow_path) {
1489 	cassert(config_prof);
1490 	assert(opt_prof);
1491 
1492 	extent_t *extent = iealloc(tsdn, ptr);
1493 	size_t usize = arena_prof_demote(tsdn, extent, ptr);
1494 	if (usize <= tcache_maxclass) {
1495 		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1496 		    sz_size2index(usize), slow_path);
1497 	} else {
1498 		large_dalloc(tsdn, extent);
1499 	}
1500 }
1501 
1502 static void
arena_dissociate_bin_slab(arena_t * arena,extent_t * slab,bin_t * bin)1503 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1504 	/* Dissociate slab from bin. */
1505 	if (slab == bin->slabcur) {
1506 		bin->slabcur = NULL;
1507 	} else {
1508 		szind_t binind = extent_szind_get(slab);
1509 		const bin_info_t *bin_info = &bin_infos[binind];
1510 
1511 		/*
1512 		 * The following block's conditional is necessary because if the
1513 		 * slab only contains one region, then it never gets inserted
1514 		 * into the non-full slabs heap.
1515 		 */
1516 		if (bin_info->nregs == 1) {
1517 			arena_bin_slabs_full_remove(arena, bin, slab);
1518 		} else {
1519 			arena_bin_slabs_nonfull_remove(bin, slab);
1520 		}
1521 	}
1522 }
1523 
1524 static void
arena_dalloc_bin_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1525 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1526     bin_t *bin) {
1527 	assert(slab != bin->slabcur);
1528 
1529 	malloc_mutex_unlock(tsdn, &bin->lock);
1530 	/******************************/
1531 	arena_slab_dalloc(tsdn, arena, slab);
1532 	/****************************/
1533 	malloc_mutex_lock(tsdn, &bin->lock);
1534 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1535 	if (config_stats) {
1536 		bin->stats.curslabs--;
1537 	}
1538 #endif
1539 }
1540 
1541 static void
arena_bin_lower_slab(UNUSED tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin)1542 arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1543     bin_t *bin) {
1544 	assert(extent_nfree_get(slab) > 0);
1545 
1546 	/*
1547 	 * Make sure that if bin->slabcur is non-NULL, it refers to the
1548 	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
1549 	 * than proactively keeping it pointing at the oldest/lowest non-full
1550 	 * slab.
1551 	 */
1552 	if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1553 		/* Switch slabcur. */
1554 		if (extent_nfree_get(bin->slabcur) > 0) {
1555 			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1556 		} else {
1557 			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1558 		}
1559 		bin->slabcur = slab;
1560 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1561 		if (config_stats) {
1562 			bin->stats.reslabs++;
1563 		}
1564 #endif
1565 	} else {
1566 		arena_bin_slabs_nonfull_insert(bin, slab);
1567 	}
1568 }
1569 
1570 static void
arena_dalloc_bin_locked_impl(tsdn_t * tsdn,arena_t * arena,extent_t * slab,void * ptr,bool junked)1571 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1572     void *ptr, bool junked) {
1573 	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1574 	szind_t binind = extent_szind_get(slab);
1575 	bin_t *bin = &arena->bins[binind];
1576 	const bin_info_t *bin_info = &bin_infos[binind];
1577 
1578 	if (!junked && config_fill && unlikely(opt_junk_free)) {
1579 		arena_dalloc_junk_small(ptr, bin_info);
1580 	}
1581 
1582 	arena_slab_reg_dalloc(slab, slab_data, ptr);
1583 	unsigned nfree = extent_nfree_get(slab);
1584 	if (nfree == bin_info->nregs) {
1585 		arena_dissociate_bin_slab(arena, slab, bin);
1586 		arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1587 	} else if (nfree == 1 && slab != bin->slabcur) {
1588 		arena_bin_slabs_full_remove(arena, bin, slab);
1589 		arena_bin_lower_slab(tsdn, arena, slab, bin);
1590 	}
1591 
1592 	if (config_stats) {
1593 		bin->stats.ndalloc++;
1594 		bin->stats.curregs--;
1595 	}
1596 }
1597 
1598 void
arena_dalloc_bin_junked_locked(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1599 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
1600     void *ptr) {
1601 	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
1602 }
1603 
1604 static void
arena_dalloc_bin(tsdn_t * tsdn,arena_t * arena,extent_t * extent,void * ptr)1605 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1606 	szind_t binind = extent_szind_get(extent);
1607 	bin_t *bin = &arena->bins[binind];
1608 
1609 	malloc_mutex_lock(tsdn, &bin->lock);
1610 	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
1611 	malloc_mutex_unlock(tsdn, &bin->lock);
1612 }
1613 
1614 void
arena_dalloc_small(tsdn_t * tsdn,void * ptr)1615 arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1616 	extent_t *extent = iealloc(tsdn, ptr);
1617 	arena_t *arena = extent_arena_get(extent);
1618 
1619 	arena_dalloc_bin(tsdn, arena, extent, ptr);
1620 	arena_decay_tick(tsdn, arena);
1621 }
1622 
1623 bool
arena_ralloc_no_move(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t extra,bool zero)1624 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1625     size_t extra, bool zero) {
1626 	/* Calls with non-zero extra had to clamp extra. */
1627 	assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
1628 
1629 	if (unlikely(size > LARGE_MAXCLASS)) {
1630 		return true;
1631 	}
1632 
1633 	extent_t *extent = iealloc(tsdn, ptr);
1634 	size_t usize_min = sz_s2u(size);
1635 	size_t usize_max = sz_s2u(size + extra);
1636 	if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
1637 		/*
1638 		 * Avoid moving the allocation if the size class can be left the
1639 		 * same.
1640 		 */
1641 		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1642 		    oldsize);
1643 		if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
1644 		    sz_size2index(oldsize)) && (size > oldsize || usize_max <
1645 		    oldsize)) {
1646 			return true;
1647 		}
1648 
1649 		arena_decay_tick(tsdn, extent_arena_get(extent));
1650 		return false;
1651 	} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
1652 		return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1653 		    zero);
1654 	}
1655 
1656 	return true;
1657 }
1658 
1659 static void *
arena_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)1660 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1661     size_t alignment, bool zero, tcache_t *tcache) {
1662 	if (alignment == 0) {
1663 		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1664 		    zero, tcache, true);
1665 	}
1666 	usize = sz_sa2u(usize, alignment);
1667 	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1668 		return NULL;
1669 	}
1670 	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1671 }
1672 
1673 void *
arena_ralloc(tsdn_t * tsdn,arena_t * arena,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache)1674 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1675     size_t size, size_t alignment, bool zero, tcache_t *tcache) {
1676 	size_t usize = sz_s2u(size);
1677 	if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
1678 		return NULL;
1679 	}
1680 
1681 	if (likely(usize <= SMALL_MAXCLASS)) {
1682 		/* Try to avoid moving the allocation. */
1683 		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
1684 			return ptr;
1685 		}
1686 	}
1687 
1688 	if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
1689 		return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
1690 		    alignment, zero, tcache);
1691 	}
1692 
1693 	/*
1694 	 * size and oldsize are different enough that we need to move the
1695 	 * object.  In that case, fall back to allocating new space and copying.
1696 	 */
1697 	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1698 	    zero, tcache);
1699 	if (ret == NULL) {
1700 		return NULL;
1701 	}
1702 
1703 	/*
1704 	 * Junk/zero-filling were already done by
1705 	 * ipalloc()/arena_malloc().
1706 	 */
1707 
1708 	size_t copysize = (usize < oldsize) ? usize : oldsize;
1709 	memcpy(ret, ptr, copysize);
1710 	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1711 	return ret;
1712 }
1713 
1714 dss_prec_t
arena_dss_prec_get(arena_t * arena)1715 arena_dss_prec_get(arena_t *arena) {
1716 	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1717 }
1718 
1719 bool
arena_dss_prec_set(arena_t * arena,dss_prec_t dss_prec)1720 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1721 	if (!have_dss) {
1722 		return (dss_prec != dss_prec_disabled);
1723 	}
1724 	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1725 	return false;
1726 }
1727 
1728 ssize_t
arena_dirty_decay_ms_default_get(void)1729 arena_dirty_decay_ms_default_get(void) {
1730 	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1731 }
1732 
1733 bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms)1734 arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1735 	if (!arena_decay_ms_valid(decay_ms)) {
1736 		return true;
1737 	}
1738 	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1739 	return false;
1740 }
1741 
1742 ssize_t
arena_muzzy_decay_ms_default_get(void)1743 arena_muzzy_decay_ms_default_get(void) {
1744 	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1745 }
1746 
1747 bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms)1748 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1749 	if (!arena_decay_ms_valid(decay_ms)) {
1750 		return true;
1751 	}
1752 	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1753 	return false;
1754 }
1755 
1756 bool
arena_retain_grow_limit_get_set(tsd_t * tsd,arena_t * arena,size_t * old_limit,size_t * new_limit)1757 arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1758     size_t *new_limit) {
1759 	assert(opt_retain);
1760 
1761 	pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1762 	if (new_limit != NULL) {
1763 		size_t limit = *new_limit;
1764 		/* Grow no more than the new limit. */
1765 		if ((new_ind = sz_psz2ind(limit + 1) - 1) >
1766 		     EXTENT_GROW_MAX_PIND) {
1767 			return true;
1768 		}
1769 	}
1770 
1771 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1772 	if (old_limit != NULL) {
1773 		*old_limit = sz_pind2sz(arena->retain_grow_limit);
1774 	}
1775 	if (new_limit != NULL) {
1776 		arena->retain_grow_limit = new_ind;
1777 	}
1778 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1779 
1780 	return false;
1781 }
1782 
1783 unsigned
arena_nthreads_get(arena_t * arena,bool internal)1784 arena_nthreads_get(arena_t *arena, bool internal) {
1785 	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1786 }
1787 
1788 void
arena_nthreads_inc(arena_t * arena,bool internal)1789 arena_nthreads_inc(arena_t *arena, bool internal) {
1790 	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1791 }
1792 
1793 void
arena_nthreads_dec(arena_t * arena,bool internal)1794 arena_nthreads_dec(arena_t *arena, bool internal) {
1795 	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1796 }
1797 
1798 size_t
arena_extent_sn_next(arena_t * arena)1799 arena_extent_sn_next(arena_t *arena) {
1800 	return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1801 }
1802 
1803 arena_t *
arena_new(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)1804 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1805 	arena_t *arena;
1806 	base_t *base;
1807 	unsigned i;
1808 
1809 	if (ind == 0) {
1810 		base = b0get();
1811 	} else {
1812 		base = base_new(tsdn, ind, extent_hooks);
1813 		if (base == NULL) {
1814 			return NULL;
1815 		}
1816 	}
1817 
1818 	arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
1819 	if (arena == NULL) {
1820 		goto label_error;
1821 	}
1822 
1823 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1824 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1825 	arena->last_thd = NULL;
1826 
1827 	if (config_stats) {
1828 		if (arena_stats_init(tsdn, &arena->stats)) {
1829 			goto label_error;
1830 		}
1831 
1832 #if defined(ANDROID_ENABLE_TCACHE)
1833 		ql_new(&arena->tcache_ql);
1834 		ql_new(&arena->cache_bin_array_descriptor_ql);
1835 		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1836 		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1837 			goto label_error;
1838 		}
1839 #endif
1840 	}
1841 
1842 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1843 	if (config_prof) {
1844 		if (prof_accum_init(tsdn, &arena->prof_accum)) {
1845 			goto label_error;
1846 		}
1847 	}
1848 #endif
1849 
1850 	if (config_cache_oblivious) {
1851 		/*
1852 		 * A nondeterministic seed based on the address of arena reduces
1853 		 * the likelihood of lockstep non-uniform cache index
1854 		 * utilization among identical concurrent processes, but at the
1855 		 * cost of test repeatability.  For debug builds, instead use a
1856 		 * deterministic seed.
1857 		 */
1858 		atomic_store_zu(&arena->offset_state, config_debug ? ind :
1859 		    (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1860 	}
1861 
1862 	atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
1863 
1864 	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1865 	    ATOMIC_RELAXED);
1866 
1867 	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1868 
1869 	extent_list_init(&arena->large);
1870 	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1871 	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1872 		goto label_error;
1873 	}
1874 
1875 	/*
1876 	 * Delay coalescing for dirty extents despite the disruptive effect on
1877 	 * memory layout for best-fit extent allocation, since cached extents
1878 	 * are likely to be reused soon after deallocation, and the cost of
1879 	 * merging/splitting extents is non-trivial.
1880 	 */
1881 	if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1882 	    true)) {
1883 		goto label_error;
1884 	}
1885 	/*
1886 	 * Coalesce muzzy extents immediately, because operations on them are in
1887 	 * the critical path much less often than for dirty extents.
1888 	 */
1889 	if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1890 	    false)) {
1891 		goto label_error;
1892 	}
1893 	/*
1894 	 * Coalesce retained extents immediately, in part because they will
1895 	 * never be evicted (and therefore there's no opportunity for delayed
1896 	 * coalescing), but also because operations on retained extents are not
1897 	 * in the critical path.
1898 	 */
1899 	if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
1900 	    false)) {
1901 		goto label_error;
1902 	}
1903 
1904 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1905 	if (arena_decay_init(&arena->decay_dirty,
1906 	    arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
1907 		goto label_error;
1908 	}
1909 	if (arena_decay_init(&arena->decay_muzzy,
1910 	    arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
1911 		goto label_error;
1912 	}
1913 #else
1914 	if (arena_decay_init(&arena->decay_dirty,
1915 	    arena_dirty_decay_ms_default_get(), NULL)) {
1916 		goto label_error;
1917 	}
1918 	if (arena_decay_init(&arena->decay_muzzy,
1919 	    arena_muzzy_decay_ms_default_get(), NULL)) {
1920 		goto label_error;
1921 	}
1922 #endif
1923 
1924 	arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
1925 	arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
1926 	if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
1927 	    WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
1928 		goto label_error;
1929 	}
1930 
1931 	extent_avail_new(&arena->extent_avail);
1932 	if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
1933 	    WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
1934 		goto label_error;
1935 	}
1936 
1937 	/* Initialize bins. */
1938 	for (i = 0; i < NBINS; i++) {
1939 		bool err = bin_init(&arena->bins[i]);
1940 		if (err) {
1941 			goto label_error;
1942 		}
1943 	}
1944 
1945 	arena->base = base;
1946 	/* Set arena before creating background threads. */
1947 	arena_set(ind, arena);
1948 
1949 	nstime_init(&arena->create_time, 0);
1950 	nstime_update(&arena->create_time);
1951 
1952 	/* We don't support reentrancy for arena 0 bootstrapping. */
1953 	if (ind != 0) {
1954 		/*
1955 		 * If we're here, then arena 0 already exists, so bootstrapping
1956 		 * is done enough that we should have tsd.
1957 		 */
1958 		assert(!tsdn_null(tsdn));
1959 		pre_reentrancy(tsdn_tsd(tsdn), arena);
1960 		if (hooks_arena_new_hook) {
1961 			hooks_arena_new_hook();
1962 		}
1963 		post_reentrancy(tsdn_tsd(tsdn));
1964 	}
1965 
1966 	return arena;
1967 label_error:
1968 	if (ind != 0) {
1969 		base_delete(tsdn, base);
1970 	}
1971 	return NULL;
1972 }
1973 
1974 void
arena_boot(void)1975 arena_boot(void) {
1976 	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
1977 	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
1978 #define REGIND_bin_yes(index, reg_size) 				\
1979 	div_init(&arena_binind_div_info[(index)], (reg_size));
1980 #define REGIND_bin_no(index, reg_size)
1981 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs,		\
1982     lg_delta_lookup)							\
1983 	REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
1984 	SIZE_CLASSES
1985 #undef REGIND_bin_yes
1986 #undef REGIND_bin_no
1987 #undef SC
1988 }
1989 
1990 void
arena_prefork0(tsdn_t * tsdn,arena_t * arena)1991 arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
1992 	malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
1993 	malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
1994 }
1995 
1996 void
arena_prefork1(tsdn_t * tsdn,arena_t * arena)1997 arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
1998 	if (config_stats) {
1999 #if defined(ANDROID_ENABLE_TCACHE)
2000 		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
2001 #endif
2002 #ifndef JEMALLOC_ATOMIC_U64
2003 		malloc_mutex_prefork(tsdn, &arena->stats.mtx);
2004 #endif
2005 	}
2006 }
2007 
2008 void
arena_prefork2(tsdn_t * tsdn,arena_t * arena)2009 arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
2010 	malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
2011 }
2012 
2013 void
arena_prefork3(tsdn_t * tsdn,arena_t * arena)2014 arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
2015 	extents_prefork(tsdn, &arena->extents_dirty);
2016 	extents_prefork(tsdn, &arena->extents_muzzy);
2017 	extents_prefork(tsdn, &arena->extents_retained);
2018 }
2019 
2020 void
arena_prefork4(tsdn_t * tsdn,arena_t * arena)2021 arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
2022 	malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
2023 }
2024 
2025 void
arena_prefork5(tsdn_t * tsdn,arena_t * arena)2026 arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
2027 	base_prefork(tsdn, arena->base);
2028 }
2029 
2030 void
arena_prefork6(tsdn_t * tsdn,arena_t * arena)2031 arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
2032 	malloc_mutex_prefork(tsdn, &arena->large_mtx);
2033 }
2034 
2035 void
arena_prefork7(tsdn_t * tsdn,arena_t * arena)2036 arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
2037 	for (unsigned i = 0; i < NBINS; i++) {
2038 		bin_prefork(tsdn, &arena->bins[i]);
2039 	}
2040 }
2041 
2042 void
arena_postfork_parent(tsdn_t * tsdn,arena_t * arena)2043 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
2044 	unsigned i;
2045 
2046 	for (i = 0; i < NBINS; i++) {
2047 		bin_postfork_parent(tsdn, &arena->bins[i]);
2048 	}
2049 	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
2050 	base_postfork_parent(tsdn, arena->base);
2051 	malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
2052 	extents_postfork_parent(tsdn, &arena->extents_retained);
2053 	extents_postfork_parent(tsdn, &arena->extents_muzzy);
2054 	extents_postfork_parent(tsdn, &arena->extents_dirty);
2055 	malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
2056 	malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
2057 	malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
2058 	if (config_stats) {
2059 #ifndef JEMALLOC_ATOMIC_U64
2060 		malloc_mutex_postfork_parent(tsdn, &arena->stats.mtx);
2061 #endif
2062 #if defined(ANDROID_ENABLE_TCACHE)
2063 		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2064 #endif
2065 	}
2066 }
2067 
2068 void
arena_postfork_child(tsdn_t * tsdn,arena_t * arena)2069 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2070 	unsigned i;
2071 
2072 	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2073 	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2074 	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2075 		arena_nthreads_inc(arena, false);
2076 	}
2077 	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2078 		arena_nthreads_inc(arena, true);
2079 	}
2080 #if defined(ANDROID_ENABLE_TCACHE)
2081 	if (config_stats) {
2082 		ql_new(&arena->tcache_ql);
2083 		ql_new(&arena->cache_bin_array_descriptor_ql);
2084 		tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2085 		if (tcache != NULL && tcache->arena == arena) {
2086 			ql_elm_new(tcache, link);
2087 			ql_tail_insert(&arena->tcache_ql, tcache, link);
2088 			cache_bin_array_descriptor_init(
2089 			    &tcache->cache_bin_array_descriptor,
2090 			    tcache->bins_small, tcache->bins_large);
2091 			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2092 			    &tcache->cache_bin_array_descriptor, link);
2093 		}
2094 	}
2095 #endif
2096 
2097 	for (i = 0; i < NBINS; i++) {
2098 		bin_postfork_child(tsdn, &arena->bins[i]);
2099 	}
2100 	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2101 	base_postfork_child(tsdn, arena->base);
2102 	malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2103 	extents_postfork_child(tsdn, &arena->extents_retained);
2104 	extents_postfork_child(tsdn, &arena->extents_muzzy);
2105 	extents_postfork_child(tsdn, &arena->extents_dirty);
2106 	malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2107 	malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2108 	malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2109 	if (config_stats) {
2110 #ifndef JEMALLOC_ATOMIC_U64
2111 		malloc_mutex_postfork_child(tsdn, &arena->stats.mtx);
2112 #endif
2113 #if defined(ANDROID_ENABLE_TCACHE)
2114 		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2115 #endif
2116 	}
2117 }
2118