1 #define JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/ctl.h"
7 #include "jemalloc/internal/extent_dss.h"
8 #include "jemalloc/internal/extent_mmap.h"
9 #include "jemalloc/internal/mutex.h"
10 #include "jemalloc/internal/nstime.h"
11 #include "jemalloc/internal/size_classes.h"
12 #include "jemalloc/internal/util.h"
13
14 /******************************************************************************/
15 /* Data. */
16
17 /*
18 * ctl_mtx protects the following:
19 * - ctl_stats->*
20 */
21 static malloc_mutex_t ctl_mtx;
22 static bool ctl_initialized;
23 static ctl_stats_t *ctl_stats;
24 static ctl_arenas_t *ctl_arenas;
25
26 /******************************************************************************/
27 /* Helpers for named and indexed nodes. */
28
29 static const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)30 ctl_named_node(const ctl_node_t *node) {
31 return ((node->named) ? (const ctl_named_node_t *)node : NULL);
32 }
33
34 static const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,size_t index)35 ctl_named_children(const ctl_named_node_t *node, size_t index) {
36 const ctl_named_node_t *children = ctl_named_node(node->children);
37
38 return (children ? &children[index] : NULL);
39 }
40
41 static const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)42 ctl_indexed_node(const ctl_node_t *node) {
43 return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
44 }
45
46 /******************************************************************************/
47 /* Function prototypes for non-inline static functions. */
48
49 #define CTL_PROTO(n) \
50 static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
51 void *oldp, size_t *oldlenp, void *newp, size_t newlen);
52
53 #define INDEX_PROTO(n) \
54 static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
55 const size_t *mib, size_t miblen, size_t i);
56
57 CTL_PROTO(version)
58 CTL_PROTO(epoch)
59 CTL_PROTO(background_thread)
60 CTL_PROTO(max_background_threads)
61 CTL_PROTO(thread_tcache_enabled)
62 CTL_PROTO(thread_tcache_flush)
63 CTL_PROTO(thread_prof_name)
64 CTL_PROTO(thread_prof_active)
65 CTL_PROTO(thread_arena)
66 CTL_PROTO(thread_allocated)
67 CTL_PROTO(thread_allocatedp)
68 CTL_PROTO(thread_deallocated)
69 CTL_PROTO(thread_deallocatedp)
70 CTL_PROTO(config_cache_oblivious)
71 CTL_PROTO(config_debug)
72 CTL_PROTO(config_fill)
73 CTL_PROTO(config_lazy_lock)
74 CTL_PROTO(config_malloc_conf)
75 CTL_PROTO(config_prof)
76 CTL_PROTO(config_prof_libgcc)
77 CTL_PROTO(config_prof_libunwind)
78 CTL_PROTO(config_stats)
79 CTL_PROTO(config_utrace)
80 CTL_PROTO(config_xmalloc)
81 CTL_PROTO(opt_abort)
82 CTL_PROTO(opt_abort_conf)
83 CTL_PROTO(opt_metadata_thp)
84 CTL_PROTO(opt_retain)
85 CTL_PROTO(opt_dss)
86 CTL_PROTO(opt_narenas)
87 CTL_PROTO(opt_percpu_arena)
88 CTL_PROTO(opt_background_thread)
89 CTL_PROTO(opt_max_background_threads)
90 CTL_PROTO(opt_dirty_decay_ms)
91 CTL_PROTO(opt_muzzy_decay_ms)
92 CTL_PROTO(opt_stats_print)
93 CTL_PROTO(opt_stats_print_opts)
94 CTL_PROTO(opt_junk)
95 CTL_PROTO(opt_zero)
96 CTL_PROTO(opt_utrace)
97 CTL_PROTO(opt_xmalloc)
98 CTL_PROTO(opt_tcache)
99 CTL_PROTO(opt_thp)
100 CTL_PROTO(opt_lg_extent_max_active_fit)
101 CTL_PROTO(opt_lg_tcache_max)
102 CTL_PROTO(opt_prof)
103 CTL_PROTO(opt_prof_prefix)
104 CTL_PROTO(opt_prof_active)
105 CTL_PROTO(opt_prof_thread_active_init)
106 CTL_PROTO(opt_lg_prof_sample)
107 CTL_PROTO(opt_lg_prof_interval)
108 CTL_PROTO(opt_prof_gdump)
109 CTL_PROTO(opt_prof_final)
110 CTL_PROTO(opt_prof_leak)
111 CTL_PROTO(opt_prof_accum)
112 CTL_PROTO(tcache_create)
113 CTL_PROTO(tcache_flush)
114 CTL_PROTO(tcache_destroy)
115 CTL_PROTO(arena_i_initialized)
116 CTL_PROTO(arena_i_decay)
117 CTL_PROTO(arena_i_purge)
118 CTL_PROTO(arena_i_reset)
119 CTL_PROTO(arena_i_destroy)
120 CTL_PROTO(arena_i_dss)
121 CTL_PROTO(arena_i_dirty_decay_ms)
122 CTL_PROTO(arena_i_muzzy_decay_ms)
123 CTL_PROTO(arena_i_extent_hooks)
124 CTL_PROTO(arena_i_retain_grow_limit)
125 INDEX_PROTO(arena_i)
126 CTL_PROTO(arenas_bin_i_size)
127 CTL_PROTO(arenas_bin_i_nregs)
128 CTL_PROTO(arenas_bin_i_slab_size)
129 INDEX_PROTO(arenas_bin_i)
130 CTL_PROTO(arenas_lextent_i_size)
131 INDEX_PROTO(arenas_lextent_i)
132 CTL_PROTO(arenas_narenas)
133 CTL_PROTO(arenas_dirty_decay_ms)
134 CTL_PROTO(arenas_muzzy_decay_ms)
135 CTL_PROTO(arenas_quantum)
136 CTL_PROTO(arenas_page)
137 CTL_PROTO(arenas_tcache_max)
138 CTL_PROTO(arenas_nbins)
139 CTL_PROTO(arenas_nhbins)
140 CTL_PROTO(arenas_nlextents)
141 CTL_PROTO(arenas_create)
142 CTL_PROTO(arenas_lookup)
143 CTL_PROTO(prof_thread_active_init)
144 CTL_PROTO(prof_active)
145 CTL_PROTO(prof_dump)
146 CTL_PROTO(prof_gdump)
147 CTL_PROTO(prof_reset)
148 CTL_PROTO(prof_interval)
149 CTL_PROTO(lg_prof_sample)
150 CTL_PROTO(stats_arenas_i_small_allocated)
151 CTL_PROTO(stats_arenas_i_small_nmalloc)
152 CTL_PROTO(stats_arenas_i_small_ndalloc)
153 CTL_PROTO(stats_arenas_i_small_nrequests)
154 #if !defined(ANDROID_MINIMIZE_STRUCTS)
155 CTL_PROTO(stats_arenas_i_large_allocated)
156 CTL_PROTO(stats_arenas_i_large_nmalloc)
157 CTL_PROTO(stats_arenas_i_large_ndalloc)
158 #endif
159 CTL_PROTO(stats_arenas_i_large_nrequests)
160 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
161 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
162 #if !defined(ANDROID_MINIMIZE_STRUCTS)
163 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
164 #endif
165 CTL_PROTO(stats_arenas_i_bins_j_curregs)
166 #if !defined(ANDROID_MINIMIZE_STRUCTS)
167 CTL_PROTO(stats_arenas_i_bins_j_nfills)
168 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
169 CTL_PROTO(stats_arenas_i_bins_j_nslabs)
170 CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
171 CTL_PROTO(stats_arenas_i_bins_j_curslabs)
172 #endif
173 INDEX_PROTO(stats_arenas_i_bins_j)
174 CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
175 CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
176 #if !defined(ANDROID_MINIMIZE_STRUCTS)
177 CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
178 CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
179 #endif
180 INDEX_PROTO(stats_arenas_i_lextents_j)
181 CTL_PROTO(stats_arenas_i_nthreads)
182 CTL_PROTO(stats_arenas_i_uptime)
183 CTL_PROTO(stats_arenas_i_dss)
184 CTL_PROTO(stats_arenas_i_dirty_decay_ms)
185 CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
186 CTL_PROTO(stats_arenas_i_pactive)
187 CTL_PROTO(stats_arenas_i_pdirty)
188 CTL_PROTO(stats_arenas_i_pmuzzy)
189 CTL_PROTO(stats_arenas_i_mapped)
190 #if !defined(ANDROID_MINIMIZE_STRUCTS)
191 CTL_PROTO(stats_arenas_i_retained)
192 CTL_PROTO(stats_arenas_i_dirty_npurge)
193 CTL_PROTO(stats_arenas_i_dirty_nmadvise)
194 CTL_PROTO(stats_arenas_i_dirty_purged)
195 CTL_PROTO(stats_arenas_i_muzzy_npurge)
196 CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
197 CTL_PROTO(stats_arenas_i_muzzy_purged)
198 CTL_PROTO(stats_arenas_i_base)
199 CTL_PROTO(stats_arenas_i_internal)
200 CTL_PROTO(stats_arenas_i_metadata_thp)
201 CTL_PROTO(stats_arenas_i_tcache_bytes)
202 CTL_PROTO(stats_arenas_i_resident)
203 #endif
204 INDEX_PROTO(stats_arenas_i)
205 CTL_PROTO(stats_allocated)
206 CTL_PROTO(stats_active)
207 CTL_PROTO(stats_background_thread_num_threads)
208 CTL_PROTO(stats_background_thread_num_runs)
209 CTL_PROTO(stats_background_thread_run_interval)
210 CTL_PROTO(stats_metadata)
211 CTL_PROTO(stats_metadata_thp)
212 CTL_PROTO(stats_resident)
213 CTL_PROTO(stats_mapped)
214 CTL_PROTO(stats_retained)
215
216 #define MUTEX_STATS_CTL_PROTO_GEN(n) \
217 CTL_PROTO(stats_##n##_num_ops) \
218 CTL_PROTO(stats_##n##_num_wait) \
219 CTL_PROTO(stats_##n##_num_spin_acq) \
220 CTL_PROTO(stats_##n##_num_owner_switch) \
221 CTL_PROTO(stats_##n##_total_wait_time) \
222 CTL_PROTO(stats_##n##_max_wait_time) \
223 CTL_PROTO(stats_##n##_max_num_thds)
224
225 #if !defined(ANDROID_MINIMIZE_STRUCTS)
226 /* Global mutexes. */
227 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
228 MUTEX_PROF_GLOBAL_MUTEXES
229 #undef OP
230 #endif
231
232 #if !defined(ANDROID_MINIMIZE_STRUCTS)
233 /* Per arena mutexes. */
234 #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
235 MUTEX_PROF_ARENA_MUTEXES
236 #undef OP
237 #endif
238
239 /* Arena bin mutexes. */
240 MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
241 #undef MUTEX_STATS_CTL_PROTO_GEN
242
243 CTL_PROTO(stats_mutexes_reset)
244
245 /******************************************************************************/
246 /* mallctl tree. */
247
248 #define NAME(n) {true}, n
249 #define CHILD(t, c) \
250 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
251 (ctl_node_t *)c##_node, \
252 NULL
253 #define CTL(c) 0, NULL, c##_ctl
254
255 /*
256 * Only handles internal indexed nodes, since there are currently no external
257 * ones.
258 */
259 #define INDEX(i) {false}, i##_index
260
261 static const ctl_named_node_t thread_tcache_node[] = {
262 {NAME("enabled"), CTL(thread_tcache_enabled)},
263 {NAME("flush"), CTL(thread_tcache_flush)}
264 };
265
266 static const ctl_named_node_t thread_prof_node[] = {
267 {NAME("name"), CTL(thread_prof_name)},
268 {NAME("active"), CTL(thread_prof_active)}
269 };
270
271 static const ctl_named_node_t thread_node[] = {
272 {NAME("arena"), CTL(thread_arena)},
273 {NAME("allocated"), CTL(thread_allocated)},
274 {NAME("allocatedp"), CTL(thread_allocatedp)},
275 {NAME("deallocated"), CTL(thread_deallocated)},
276 {NAME("deallocatedp"), CTL(thread_deallocatedp)},
277 {NAME("tcache"), CHILD(named, thread_tcache)},
278 {NAME("prof"), CHILD(named, thread_prof)}
279 };
280
281 static const ctl_named_node_t config_node[] = {
282 {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
283 {NAME("debug"), CTL(config_debug)},
284 {NAME("fill"), CTL(config_fill)},
285 {NAME("lazy_lock"), CTL(config_lazy_lock)},
286 {NAME("malloc_conf"), CTL(config_malloc_conf)},
287 {NAME("prof"), CTL(config_prof)},
288 {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
289 {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
290 {NAME("stats"), CTL(config_stats)},
291 {NAME("utrace"), CTL(config_utrace)},
292 {NAME("xmalloc"), CTL(config_xmalloc)}
293 };
294
295 static const ctl_named_node_t opt_node[] = {
296 {NAME("abort"), CTL(opt_abort)},
297 {NAME("abort_conf"), CTL(opt_abort_conf)},
298 {NAME("metadata_thp"), CTL(opt_metadata_thp)},
299 {NAME("retain"), CTL(opt_retain)},
300 {NAME("dss"), CTL(opt_dss)},
301 {NAME("narenas"), CTL(opt_narenas)},
302 {NAME("percpu_arena"), CTL(opt_percpu_arena)},
303 {NAME("background_thread"), CTL(opt_background_thread)},
304 {NAME("max_background_threads"), CTL(opt_max_background_threads)},
305 {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
306 {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
307 {NAME("stats_print"), CTL(opt_stats_print)},
308 {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
309 {NAME("junk"), CTL(opt_junk)},
310 {NAME("zero"), CTL(opt_zero)},
311 {NAME("utrace"), CTL(opt_utrace)},
312 {NAME("xmalloc"), CTL(opt_xmalloc)},
313 {NAME("tcache"), CTL(opt_tcache)},
314 {NAME("thp"), CTL(opt_thp)},
315 {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
316 {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
317 {NAME("prof"), CTL(opt_prof)},
318 {NAME("prof_prefix"), CTL(opt_prof_prefix)},
319 {NAME("prof_active"), CTL(opt_prof_active)},
320 {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
321 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
322 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
323 {NAME("prof_gdump"), CTL(opt_prof_gdump)},
324 {NAME("prof_final"), CTL(opt_prof_final)},
325 {NAME("prof_leak"), CTL(opt_prof_leak)},
326 {NAME("prof_accum"), CTL(opt_prof_accum)}
327 };
328
329 static const ctl_named_node_t tcache_node[] = {
330 {NAME("create"), CTL(tcache_create)},
331 {NAME("flush"), CTL(tcache_flush)},
332 {NAME("destroy"), CTL(tcache_destroy)}
333 };
334
335 static const ctl_named_node_t arena_i_node[] = {
336 {NAME("initialized"), CTL(arena_i_initialized)},
337 {NAME("decay"), CTL(arena_i_decay)},
338 {NAME("purge"), CTL(arena_i_purge)},
339 {NAME("reset"), CTL(arena_i_reset)},
340 {NAME("destroy"), CTL(arena_i_destroy)},
341 {NAME("dss"), CTL(arena_i_dss)},
342 {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
343 {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
344 {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
345 {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
346 };
347 static const ctl_named_node_t super_arena_i_node[] = {
348 {NAME(""), CHILD(named, arena_i)}
349 };
350
351 static const ctl_indexed_node_t arena_node[] = {
352 {INDEX(arena_i)}
353 };
354
355 static const ctl_named_node_t arenas_bin_i_node[] = {
356 {NAME("size"), CTL(arenas_bin_i_size)},
357 {NAME("nregs"), CTL(arenas_bin_i_nregs)},
358 {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
359 };
360 static const ctl_named_node_t super_arenas_bin_i_node[] = {
361 {NAME(""), CHILD(named, arenas_bin_i)}
362 };
363
364 static const ctl_indexed_node_t arenas_bin_node[] = {
365 {INDEX(arenas_bin_i)}
366 };
367
368 static const ctl_named_node_t arenas_lextent_i_node[] = {
369 {NAME("size"), CTL(arenas_lextent_i_size)}
370 };
371 static const ctl_named_node_t super_arenas_lextent_i_node[] = {
372 {NAME(""), CHILD(named, arenas_lextent_i)}
373 };
374
375 static const ctl_indexed_node_t arenas_lextent_node[] = {
376 {INDEX(arenas_lextent_i)}
377 };
378
379 static const ctl_named_node_t arenas_node[] = {
380 {NAME("narenas"), CTL(arenas_narenas)},
381 {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
382 {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
383 {NAME("quantum"), CTL(arenas_quantum)},
384 {NAME("page"), CTL(arenas_page)},
385 {NAME("tcache_max"), CTL(arenas_tcache_max)},
386 {NAME("nbins"), CTL(arenas_nbins)},
387 {NAME("nhbins"), CTL(arenas_nhbins)},
388 {NAME("bin"), CHILD(indexed, arenas_bin)},
389 {NAME("nlextents"), CTL(arenas_nlextents)},
390 {NAME("lextent"), CHILD(indexed, arenas_lextent)},
391 {NAME("create"), CTL(arenas_create)},
392 {NAME("lookup"), CTL(arenas_lookup)}
393 };
394
395 static const ctl_named_node_t prof_node[] = {
396 {NAME("thread_active_init"), CTL(prof_thread_active_init)},
397 {NAME("active"), CTL(prof_active)},
398 {NAME("dump"), CTL(prof_dump)},
399 {NAME("gdump"), CTL(prof_gdump)},
400 {NAME("reset"), CTL(prof_reset)},
401 {NAME("interval"), CTL(prof_interval)},
402 {NAME("lg_sample"), CTL(lg_prof_sample)}
403 };
404
405 static const ctl_named_node_t stats_arenas_i_small_node[] = {
406 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
407 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
408 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
409 #if !defined(ANDROID_MINIMIZE_STRUCTS)
410 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
411 #endif
412 };
413
414 static const ctl_named_node_t stats_arenas_i_large_node[] = {
415 #if !defined(ANDROID_MINIMIZE_STRUCTS)
416 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
417 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
418 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
419 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
420 #endif
421 };
422
423 #define MUTEX_PROF_DATA_NODE(prefix) \
424 static const ctl_named_node_t stats_##prefix##_node[] = { \
425 {NAME("num_ops"), \
426 CTL(stats_##prefix##_num_ops)}, \
427 {NAME("num_wait"), \
428 CTL(stats_##prefix##_num_wait)}, \
429 {NAME("num_spin_acq"), \
430 CTL(stats_##prefix##_num_spin_acq)}, \
431 {NAME("num_owner_switch"), \
432 CTL(stats_##prefix##_num_owner_switch)}, \
433 {NAME("total_wait_time"), \
434 CTL(stats_##prefix##_total_wait_time)}, \
435 {NAME("max_wait_time"), \
436 CTL(stats_##prefix##_max_wait_time)}, \
437 {NAME("max_num_thds"), \
438 CTL(stats_##prefix##_max_num_thds)} \
439 /* Note that # of current waiting thread not provided. */ \
440 };
441
442 MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
443
444 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
445 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
446 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
447 #if !defined(ANDROID_MINIMIZE_STRUCTS)
448 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
449 #endif
450 {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
451 #if !defined(ANDROID_MINIMIZE_STRUCTS)
452 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
453 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
454 {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
455 {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
456 {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
457 #endif
458 {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
459 };
460
461 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
462 {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
463 };
464
465 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
466 {INDEX(stats_arenas_i_bins_j)}
467 };
468
469 static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
470 {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
471 {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
472 #if !defined(ANDROID_MINIMIZE_STRUCTS)
473 {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
474 {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
475 #endif
476 };
477 static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
478 {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
479 };
480
481 static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
482 {INDEX(stats_arenas_i_lextents_j)}
483 };
484
485 #if !defined(ANDROID_MINIMIZE_STRUCTS)
486 #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
487 MUTEX_PROF_ARENA_MUTEXES
488 #undef OP
489 #endif
490
491 #if !defined(ANDROID_MINIMIZE_STRUCTS)
492 static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
493 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
494 MUTEX_PROF_ARENA_MUTEXES
495 #undef OP
496 };
497 #endif
498
499 static const ctl_named_node_t stats_arenas_i_node[] = {
500 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
501 {NAME("uptime"), CTL(stats_arenas_i_uptime)},
502 {NAME("dss"), CTL(stats_arenas_i_dss)},
503 {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
504 {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
505 {NAME("pactive"), CTL(stats_arenas_i_pactive)},
506 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
507 {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
508 {NAME("mapped"), CTL(stats_arenas_i_mapped)},
509 #if !defined(ANDROID_MINIMIZE_STRUCTS)
510 {NAME("retained"), CTL(stats_arenas_i_retained)},
511 {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
512 {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
513 {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
514 {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
515 {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
516 {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
517 {NAME("base"), CTL(stats_arenas_i_base)},
518 {NAME("internal"), CTL(stats_arenas_i_internal)},
519 {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
520 {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
521 {NAME("resident"), CTL(stats_arenas_i_resident)},
522 #endif
523 {NAME("small"), CHILD(named, stats_arenas_i_small)},
524 {NAME("large"), CHILD(named, stats_arenas_i_large)},
525 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
526 {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
527 #if !defined(ANDROID_MINIMIZE_STRUCTS)
528 {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
529 #endif
530 };
531 static const ctl_named_node_t super_stats_arenas_i_node[] = {
532 {NAME(""), CHILD(named, stats_arenas_i)}
533 };
534
535 static const ctl_indexed_node_t stats_arenas_node[] = {
536 {INDEX(stats_arenas_i)}
537 };
538
539 static const ctl_named_node_t stats_background_thread_node[] = {
540 {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
541 {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
542 {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
543 };
544
545 #if !defined(ANDROID_MINIMIZE_STRUCTS)
546 #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
547 MUTEX_PROF_GLOBAL_MUTEXES
548 #undef OP
549 #endif
550
551 #if !defined(ANDROID_MINIMIZE_STRUCTS)
552 static const ctl_named_node_t stats_mutexes_node[] = {
553 #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
554 MUTEX_PROF_GLOBAL_MUTEXES
555 #undef OP
556 {NAME("reset"), CTL(stats_mutexes_reset)}
557 };
558 #undef MUTEX_PROF_DATA_NODE
559 #endif
560
561 static const ctl_named_node_t stats_node[] = {
562 {NAME("allocated"), CTL(stats_allocated)},
563 {NAME("active"), CTL(stats_active)},
564 {NAME("metadata"), CTL(stats_metadata)},
565 {NAME("metadata_thp"), CTL(stats_metadata_thp)},
566 {NAME("resident"), CTL(stats_resident)},
567 {NAME("mapped"), CTL(stats_mapped)},
568 {NAME("retained"), CTL(stats_retained)},
569 {NAME("background_thread"),
570 CHILD(named, stats_background_thread)},
571 #if !defined(ANDROID_MINIMIZE_STRUCTS)
572 {NAME("mutexes"), CHILD(named, stats_mutexes)},
573 #endif
574 {NAME("arenas"), CHILD(indexed, stats_arenas)}
575 };
576
577 static const ctl_named_node_t root_node[] = {
578 {NAME("version"), CTL(version)},
579 {NAME("epoch"), CTL(epoch)},
580 {NAME("background_thread"), CTL(background_thread)},
581 {NAME("max_background_threads"), CTL(max_background_threads)},
582 {NAME("thread"), CHILD(named, thread)},
583 {NAME("config"), CHILD(named, config)},
584 {NAME("opt"), CHILD(named, opt)},
585 {NAME("tcache"), CHILD(named, tcache)},
586 {NAME("arena"), CHILD(indexed, arena)},
587 {NAME("arenas"), CHILD(named, arenas)},
588 {NAME("prof"), CHILD(named, prof)},
589 {NAME("stats"), CHILD(named, stats)}
590 };
591 static const ctl_named_node_t super_root_node[] = {
592 {NAME(""), CHILD(named, root)}
593 };
594
595 #undef NAME
596 #undef CHILD
597 #undef CTL
598 #undef INDEX
599
600 /******************************************************************************/
601
602 /*
603 * Sets *dst + *src non-atomically. This is safe, since everything is
604 * synchronized by the ctl mutex.
605 */
606 static void
ctl_accum_arena_stats_u64(arena_stats_u64_t * dst,arena_stats_u64_t * src)607 ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
608 #ifdef JEMALLOC_ATOMIC_U64
609 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
610 uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
611 atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
612 #else
613 *dst += *src;
614 #endif
615 }
616
617 /* Likewise: with ctl mutex synchronization, reading is simple. */
618 static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t * p)619 ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
620 #ifdef JEMALLOC_ATOMIC_U64
621 return atomic_load_u64(p, ATOMIC_RELAXED);
622 #else
623 return *p;
624 #endif
625 }
626
627 static void
accum_atomic_zu(atomic_zu_t * dst,atomic_zu_t * src)628 accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
629 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
630 size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
631 atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
632 }
633
634 /******************************************************************************/
635
636 static unsigned
arenas_i2a_impl(size_t i,bool compat,bool validate)637 arenas_i2a_impl(size_t i, bool compat, bool validate) {
638 unsigned a;
639
640 switch (i) {
641 case MALLCTL_ARENAS_ALL:
642 a = 0;
643 break;
644 case MALLCTL_ARENAS_DESTROYED:
645 a = 1;
646 break;
647 default:
648 if (compat && i == ctl_arenas->narenas) {
649 /*
650 * Provide deprecated backward compatibility for
651 * accessing the merged stats at index narenas rather
652 * than via MALLCTL_ARENAS_ALL. This is scheduled for
653 * removal in 6.0.0.
654 */
655 a = 0;
656 } else if (validate && i >= ctl_arenas->narenas) {
657 a = UINT_MAX;
658 } else {
659 /*
660 * This function should never be called for an index
661 * more than one past the range of indices that have
662 * initialized ctl data.
663 */
664 assert(i < ctl_arenas->narenas || (!validate && i ==
665 ctl_arenas->narenas));
666 a = (unsigned)i + 2;
667 }
668 break;
669 }
670
671 return a;
672 }
673
674 static unsigned
arenas_i2a(size_t i)675 arenas_i2a(size_t i) {
676 return arenas_i2a_impl(i, true, false);
677 }
678
679 static ctl_arena_t *
arenas_i_impl(tsd_t * tsd,size_t i,bool compat,bool init)680 arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
681 ctl_arena_t *ret;
682
683 assert(!compat || !init);
684
685 ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
686 if (init && ret == NULL) {
687 if (config_stats) {
688 struct container_s {
689 ctl_arena_t ctl_arena;
690 ctl_arena_stats_t astats;
691 };
692 struct container_s *cont =
693 (struct container_s *)base_alloc(tsd_tsdn(tsd),
694 b0get(), sizeof(struct container_s), QUANTUM);
695 if (cont == NULL) {
696 return NULL;
697 }
698 ret = &cont->ctl_arena;
699 ret->astats = &cont->astats;
700 } else {
701 ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
702 sizeof(ctl_arena_t), QUANTUM);
703 if (ret == NULL) {
704 return NULL;
705 }
706 }
707 ret->arena_ind = (unsigned)i;
708 ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
709 }
710
711 assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
712 return ret;
713 }
714
715 static ctl_arena_t *
arenas_i(size_t i)716 arenas_i(size_t i) {
717 ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
718 assert(ret != NULL);
719 return ret;
720 }
721
722 static void
ctl_arena_clear(ctl_arena_t * ctl_arena)723 ctl_arena_clear(ctl_arena_t *ctl_arena) {
724 ctl_arena->nthreads = 0;
725 ctl_arena->dss = dss_prec_names[dss_prec_limit];
726 ctl_arena->dirty_decay_ms = -1;
727 ctl_arena->muzzy_decay_ms = -1;
728 ctl_arena->pactive = 0;
729 ctl_arena->pdirty = 0;
730 ctl_arena->pmuzzy = 0;
731 if (config_stats) {
732 memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
733 ctl_arena->astats->allocated_small = 0;
734 ctl_arena->astats->nmalloc_small = 0;
735 ctl_arena->astats->ndalloc_small = 0;
736 ctl_arena->astats->nrequests_small = 0;
737 memset(ctl_arena->astats->bstats, 0, NBINS *
738 sizeof(bin_stats_t));
739 memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
740 sizeof(arena_stats_large_t));
741 }
742 }
743
744 static void
ctl_arena_stats_amerge(tsdn_t * tsdn,ctl_arena_t * ctl_arena,arena_t * arena)745 ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
746 unsigned i;
747
748 if (config_stats) {
749 arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
750 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
751 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
752 &ctl_arena->pdirty, &ctl_arena->pmuzzy,
753 &ctl_arena->astats->astats, ctl_arena->astats->bstats,
754 ctl_arena->astats->lstats);
755
756 for (i = 0; i < NBINS; i++) {
757 ctl_arena->astats->allocated_small +=
758 ctl_arena->astats->bstats[i].curregs *
759 sz_index2size(i);
760 ctl_arena->astats->nmalloc_small +=
761 ctl_arena->astats->bstats[i].nmalloc;
762 ctl_arena->astats->ndalloc_small +=
763 ctl_arena->astats->bstats[i].ndalloc;
764 #if !defined(ANDROID_MINIMIZE_STRUCTS)
765 ctl_arena->astats->nrequests_small +=
766 ctl_arena->astats->bstats[i].nrequests;
767 #endif
768 }
769 } else {
770 arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
771 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
772 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
773 &ctl_arena->pdirty, &ctl_arena->pmuzzy);
774 }
775 }
776
777 static void
ctl_arena_stats_sdmerge(ctl_arena_t * ctl_sdarena,ctl_arena_t * ctl_arena,bool destroyed)778 ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
779 bool destroyed) {
780 unsigned i;
781
782 if (!destroyed) {
783 ctl_sdarena->nthreads += ctl_arena->nthreads;
784 ctl_sdarena->pactive += ctl_arena->pactive;
785 ctl_sdarena->pdirty += ctl_arena->pdirty;
786 ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
787 } else {
788 assert(ctl_arena->nthreads == 0);
789 assert(ctl_arena->pactive == 0);
790 assert(ctl_arena->pdirty == 0);
791 assert(ctl_arena->pmuzzy == 0);
792 }
793
794 if (config_stats) {
795 ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
796 ctl_arena_stats_t *astats = ctl_arena->astats;
797
798 if (!destroyed) {
799 accum_atomic_zu(&sdstats->astats.mapped,
800 &astats->astats.mapped);
801 #if !defined(ANDROID_MINIMIZE_STRUCTS)
802 accum_atomic_zu(&sdstats->astats.retained,
803 &astats->astats.retained);
804 #endif
805 }
806
807 #if !defined(ANDROID_MINIMIZE_STRUCTS)
808 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
809 &astats->astats.decay_dirty.npurge);
810 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
811 &astats->astats.decay_dirty.nmadvise);
812 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
813 &astats->astats.decay_dirty.purged);
814
815 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
816 &astats->astats.decay_muzzy.npurge);
817 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
818 &astats->astats.decay_muzzy.nmadvise);
819 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
820 &astats->astats.decay_muzzy.purged);
821 #endif
822
823 #if !defined(ANDROID_MINIMIZE_STRUCTS)
824 #define OP(mtx) malloc_mutex_prof_merge( \
825 &(sdstats->astats.mutex_prof_data[ \
826 arena_prof_mutex_##mtx]), \
827 &(astats->astats.mutex_prof_data[ \
828 arena_prof_mutex_##mtx]));
829 MUTEX_PROF_ARENA_MUTEXES
830 #undef OP
831 if (!destroyed) {
832 accum_atomic_zu(&sdstats->astats.base,
833 &astats->astats.base);
834 accum_atomic_zu(&sdstats->astats.internal,
835 &astats->astats.internal);
836 accum_atomic_zu(&sdstats->astats.resident,
837 &astats->astats.resident);
838 accum_atomic_zu(&sdstats->astats.metadata_thp,
839 &astats->astats.metadata_thp);
840 } else {
841 assert(atomic_load_zu(
842 &astats->astats.internal, ATOMIC_RELAXED) == 0);
843 }
844 #endif
845
846 if (!destroyed) {
847 sdstats->allocated_small += astats->allocated_small;
848 } else {
849 assert(astats->allocated_small == 0);
850 }
851 sdstats->nmalloc_small += astats->nmalloc_small;
852 sdstats->ndalloc_small += astats->ndalloc_small;
853 sdstats->nrequests_small += astats->nrequests_small;
854
855 #if !defined(ANDROID_MINIMIZE_STRUCTS)
856 if (!destroyed) {
857 accum_atomic_zu(&sdstats->astats.allocated_large,
858 &astats->astats.allocated_large);
859 } else {
860 assert(atomic_load_zu(&astats->astats.allocated_large,
861 ATOMIC_RELAXED) == 0);
862 }
863 ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
864 &astats->astats.nmalloc_large);
865 ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
866 &astats->astats.ndalloc_large);
867 ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
868 &astats->astats.nrequests_large);
869
870 accum_atomic_zu(&sdstats->astats.tcache_bytes,
871 &astats->astats.tcache_bytes);
872 #endif
873
874 if (ctl_arena->arena_ind == 0) {
875 sdstats->astats.uptime = astats->astats.uptime;
876 }
877
878 for (i = 0; i < NBINS; i++) {
879 sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
880 sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
881 #if !defined(ANDROID_MINIMIZE_STRUCTS)
882 sdstats->bstats[i].nrequests +=
883 astats->bstats[i].nrequests;
884 #endif
885 if (!destroyed) {
886 sdstats->bstats[i].curregs +=
887 astats->bstats[i].curregs;
888 } else {
889 assert(astats->bstats[i].curregs == 0);
890 }
891 #if !defined(ANDROID_MINIMIZE_STRUCTS)
892 sdstats->bstats[i].nfills += astats->bstats[i].nfills;
893 sdstats->bstats[i].nflushes +=
894 astats->bstats[i].nflushes;
895 sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
896 sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
897 if (!destroyed) {
898 sdstats->bstats[i].curslabs +=
899 astats->bstats[i].curslabs;
900 } else {
901 assert(astats->bstats[i].curslabs == 0);
902 }
903 #endif
904 malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
905 &astats->bstats[i].mutex_data);
906 }
907
908 for (i = 0; i < NSIZES - NBINS; i++) {
909 ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
910 &astats->lstats[i].nmalloc);
911 ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
912 &astats->lstats[i].ndalloc);
913 #if !defined(ANDROID_MINIMIZE_STRUCTS)
914 ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
915 &astats->lstats[i].nrequests);
916 if (!destroyed) {
917 sdstats->lstats[i].curlextents +=
918 astats->lstats[i].curlextents;
919 } else {
920 assert(astats->lstats[i].curlextents == 0);
921 }
922 #endif
923 }
924 }
925 }
926
927 static void
ctl_arena_refresh(tsdn_t * tsdn,arena_t * arena,ctl_arena_t * ctl_sdarena,unsigned i,bool destroyed)928 ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
929 unsigned i, bool destroyed) {
930 ctl_arena_t *ctl_arena = arenas_i(i);
931
932 ctl_arena_clear(ctl_arena);
933 ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
934 /* Merge into sum stats as well. */
935 ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
936 }
937
938 static unsigned
ctl_arena_init(tsd_t * tsd,extent_hooks_t * extent_hooks)939 ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
940 unsigned arena_ind;
941 ctl_arena_t *ctl_arena;
942
943 if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
944 NULL) {
945 ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
946 arena_ind = ctl_arena->arena_ind;
947 } else {
948 arena_ind = ctl_arenas->narenas;
949 }
950
951 /* Trigger stats allocation. */
952 if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
953 return UINT_MAX;
954 }
955
956 /* Initialize new arena. */
957 if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
958 return UINT_MAX;
959 }
960
961 if (arena_ind == ctl_arenas->narenas) {
962 ctl_arenas->narenas++;
963 }
964
965 return arena_ind;
966 }
967
968 static void
ctl_background_thread_stats_read(tsdn_t * tsdn)969 ctl_background_thread_stats_read(tsdn_t *tsdn) {
970 background_thread_stats_t *stats = &ctl_stats->background_thread;
971 if (!have_background_thread ||
972 background_thread_stats_read(tsdn, stats)) {
973 memset(stats, 0, sizeof(background_thread_stats_t));
974 nstime_init(&stats->run_interval, 0);
975 }
976 }
977
978 static void
ctl_refresh(tsdn_t * tsdn)979 ctl_refresh(tsdn_t *tsdn) {
980 unsigned i;
981 ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
982 VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
983
984 /*
985 * Clear sum stats, since they will be merged into by
986 * ctl_arena_refresh().
987 */
988 ctl_arena_clear(ctl_sarena);
989
990 for (i = 0; i < ctl_arenas->narenas; i++) {
991 tarenas[i] = arena_get(tsdn, i, false);
992 }
993
994 for (i = 0; i < ctl_arenas->narenas; i++) {
995 ctl_arena_t *ctl_arena = arenas_i(i);
996 bool initialized = (tarenas[i] != NULL);
997
998 ctl_arena->initialized = initialized;
999 if (initialized) {
1000 ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
1001 false);
1002 }
1003 }
1004
1005 if (config_stats) {
1006 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1007 ctl_stats->allocated = ctl_sarena->astats->allocated_small +
1008 atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
1009 ATOMIC_RELAXED);
1010 ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
1011 ctl_stats->metadata = atomic_load_zu(
1012 &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
1013 atomic_load_zu(&ctl_sarena->astats->astats.internal,
1014 ATOMIC_RELAXED);
1015 ctl_stats->metadata_thp = atomic_load_zu(
1016 &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
1017 ctl_stats->resident = atomic_load_zu(
1018 &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
1019 #endif
1020 ctl_stats->mapped = atomic_load_zu(
1021 &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
1022 #if !defined(ANDROID_MINIMIZE_STRUCTS)
1023 ctl_stats->retained = atomic_load_zu(
1024 &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
1025 #endif
1026
1027 ctl_background_thread_stats_read(tsdn);
1028
1029 #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
1030 malloc_mutex_lock(tsdn, &mtx); \
1031 malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
1032 malloc_mutex_unlock(tsdn, &mtx);
1033
1034 if (config_prof && opt_prof) {
1035 READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
1036 bt2gctx_mtx);
1037 }
1038 if (have_background_thread) {
1039 READ_GLOBAL_MUTEX_PROF_DATA(
1040 global_prof_mutex_background_thread,
1041 background_thread_lock);
1042 } else {
1043 memset(&ctl_stats->mutex_prof_data[
1044 global_prof_mutex_background_thread], 0,
1045 sizeof(mutex_prof_data_t));
1046 }
1047 /* We own ctl mutex already. */
1048 malloc_mutex_prof_read(tsdn,
1049 &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
1050 &ctl_mtx);
1051 #undef READ_GLOBAL_MUTEX_PROF_DATA
1052 }
1053 ctl_arenas->epoch++;
1054 }
1055
1056 static bool
ctl_init(tsd_t * tsd)1057 ctl_init(tsd_t *tsd) {
1058 bool ret;
1059 tsdn_t *tsdn = tsd_tsdn(tsd);
1060
1061 malloc_mutex_lock(tsdn, &ctl_mtx);
1062 if (!ctl_initialized) {
1063 ctl_arena_t *ctl_sarena, *ctl_darena;
1064 unsigned i;
1065
1066 /*
1067 * Allocate demand-zeroed space for pointers to the full
1068 * range of supported arena indices.
1069 */
1070 if (ctl_arenas == NULL) {
1071 ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1072 b0get(), sizeof(ctl_arenas_t), QUANTUM);
1073 if (ctl_arenas == NULL) {
1074 ret = true;
1075 goto label_return;
1076 }
1077 }
1078
1079 if (config_stats && ctl_stats == NULL) {
1080 ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1081 sizeof(ctl_stats_t), QUANTUM);
1082 if (ctl_stats == NULL) {
1083 ret = true;
1084 goto label_return;
1085 }
1086 }
1087
1088 /*
1089 * Allocate space for the current full range of arenas
1090 * here rather than doing it lazily elsewhere, in order
1091 * to limit when OOM-caused errors can occur.
1092 */
1093 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1094 true)) == NULL) {
1095 ret = true;
1096 goto label_return;
1097 }
1098 ctl_sarena->initialized = true;
1099
1100 if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1101 false, true)) == NULL) {
1102 ret = true;
1103 goto label_return;
1104 }
1105 ctl_arena_clear(ctl_darena);
1106 /*
1107 * Don't toggle ctl_darena to initialized until an arena is
1108 * actually destroyed, so that arena.<i>.initialized can be used
1109 * to query whether the stats are relevant.
1110 */
1111
1112 ctl_arenas->narenas = narenas_total_get();
1113 for (i = 0; i < ctl_arenas->narenas; i++) {
1114 if (arenas_i_impl(tsd, i, false, true) == NULL) {
1115 ret = true;
1116 goto label_return;
1117 }
1118 }
1119
1120 ql_new(&ctl_arenas->destroyed);
1121 ctl_refresh(tsdn);
1122
1123 ctl_initialized = true;
1124 }
1125
1126 ret = false;
1127 label_return:
1128 malloc_mutex_unlock(tsdn, &ctl_mtx);
1129 return ret;
1130 }
1131
1132 static int
ctl_lookup(tsdn_t * tsdn,const char * name,ctl_node_t const ** nodesp,size_t * mibp,size_t * depthp)1133 ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
1134 size_t *mibp, size_t *depthp) {
1135 int ret;
1136 const char *elm, *tdot, *dot;
1137 size_t elen, i, j;
1138 const ctl_named_node_t *node;
1139
1140 elm = name;
1141 /* Equivalent to strchrnul(). */
1142 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1143 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1144 if (elen == 0) {
1145 ret = ENOENT;
1146 goto label_return;
1147 }
1148 node = super_root_node;
1149 for (i = 0; i < *depthp; i++) {
1150 assert(node);
1151 assert(node->nchildren > 0);
1152 if (ctl_named_node(node->children) != NULL) {
1153 const ctl_named_node_t *pnode = node;
1154
1155 /* Children are named. */
1156 for (j = 0; j < node->nchildren; j++) {
1157 const ctl_named_node_t *child =
1158 ctl_named_children(node, j);
1159 if (strlen(child->name) == elen &&
1160 strncmp(elm, child->name, elen) == 0) {
1161 node = child;
1162 if (nodesp != NULL) {
1163 nodesp[i] =
1164 (const ctl_node_t *)node;
1165 }
1166 mibp[i] = j;
1167 break;
1168 }
1169 }
1170 if (node == pnode) {
1171 ret = ENOENT;
1172 goto label_return;
1173 }
1174 } else {
1175 uintmax_t index;
1176 const ctl_indexed_node_t *inode;
1177
1178 /* Children are indexed. */
1179 index = malloc_strtoumax(elm, NULL, 10);
1180 if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1181 ret = ENOENT;
1182 goto label_return;
1183 }
1184
1185 inode = ctl_indexed_node(node->children);
1186 node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1187 if (node == NULL) {
1188 ret = ENOENT;
1189 goto label_return;
1190 }
1191
1192 if (nodesp != NULL) {
1193 nodesp[i] = (const ctl_node_t *)node;
1194 }
1195 mibp[i] = (size_t)index;
1196 }
1197
1198 if (node->ctl != NULL) {
1199 /* Terminal node. */
1200 if (*dot != '\0') {
1201 /*
1202 * The name contains more elements than are
1203 * in this path through the tree.
1204 */
1205 ret = ENOENT;
1206 goto label_return;
1207 }
1208 /* Complete lookup successful. */
1209 *depthp = i + 1;
1210 break;
1211 }
1212
1213 /* Update elm. */
1214 if (*dot == '\0') {
1215 /* No more elements. */
1216 ret = ENOENT;
1217 goto label_return;
1218 }
1219 elm = &dot[1];
1220 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1221 strchr(elm, '\0');
1222 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1223 }
1224
1225 ret = 0;
1226 label_return:
1227 return ret;
1228 }
1229
1230 int
ctl_byname(tsd_t * tsd,const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1231 ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1232 void *newp, size_t newlen) {
1233 int ret;
1234 size_t depth;
1235 ctl_node_t const *nodes[CTL_MAX_DEPTH];
1236 size_t mib[CTL_MAX_DEPTH];
1237 const ctl_named_node_t *node;
1238
1239 if (!ctl_initialized && ctl_init(tsd)) {
1240 ret = EAGAIN;
1241 goto label_return;
1242 }
1243
1244 depth = CTL_MAX_DEPTH;
1245 ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
1246 if (ret != 0) {
1247 goto label_return;
1248 }
1249
1250 node = ctl_named_node(nodes[depth-1]);
1251 if (node != NULL && node->ctl) {
1252 ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1253 } else {
1254 /* The name refers to a partial path through the ctl tree. */
1255 ret = ENOENT;
1256 }
1257
1258 label_return:
1259 return(ret);
1260 }
1261
1262 int
ctl_nametomib(tsd_t * tsd,const char * name,size_t * mibp,size_t * miblenp)1263 ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1264 int ret;
1265
1266 if (!ctl_initialized && ctl_init(tsd)) {
1267 ret = EAGAIN;
1268 goto label_return;
1269 }
1270
1271 ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
1272 label_return:
1273 return(ret);
1274 }
1275
1276 int
ctl_bymib(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1277 ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1278 size_t *oldlenp, void *newp, size_t newlen) {
1279 int ret;
1280 const ctl_named_node_t *node;
1281 size_t i;
1282
1283 if (!ctl_initialized && ctl_init(tsd)) {
1284 ret = EAGAIN;
1285 goto label_return;
1286 }
1287
1288 /* Iterate down the tree. */
1289 node = super_root_node;
1290 for (i = 0; i < miblen; i++) {
1291 assert(node);
1292 assert(node->nchildren > 0);
1293 if (ctl_named_node(node->children) != NULL) {
1294 /* Children are named. */
1295 if (node->nchildren <= mib[i]) {
1296 ret = ENOENT;
1297 goto label_return;
1298 }
1299 node = ctl_named_children(node, mib[i]);
1300 } else {
1301 const ctl_indexed_node_t *inode;
1302
1303 /* Indexed element. */
1304 inode = ctl_indexed_node(node->children);
1305 node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1306 if (node == NULL) {
1307 ret = ENOENT;
1308 goto label_return;
1309 }
1310 }
1311 }
1312
1313 /* Call the ctl function. */
1314 if (node && node->ctl) {
1315 ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1316 } else {
1317 /* Partial MIB. */
1318 ret = ENOENT;
1319 }
1320
1321 label_return:
1322 return(ret);
1323 }
1324
1325 bool
ctl_boot(void)1326 ctl_boot(void) {
1327 if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1328 malloc_mutex_rank_exclusive)) {
1329 return true;
1330 }
1331
1332 ctl_initialized = false;
1333
1334 return false;
1335 }
1336
1337 void
ctl_prefork(tsdn_t * tsdn)1338 ctl_prefork(tsdn_t *tsdn) {
1339 malloc_mutex_prefork(tsdn, &ctl_mtx);
1340 }
1341
1342 void
ctl_postfork_parent(tsdn_t * tsdn)1343 ctl_postfork_parent(tsdn_t *tsdn) {
1344 malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1345 }
1346
1347 void
ctl_postfork_child(tsdn_t * tsdn)1348 ctl_postfork_child(tsdn_t *tsdn) {
1349 malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1350 }
1351
1352 /******************************************************************************/
1353 /* *_ctl() functions. */
1354
1355 #define READONLY() do { \
1356 if (newp != NULL || newlen != 0) { \
1357 ret = EPERM; \
1358 goto label_return; \
1359 } \
1360 } while (0)
1361
1362 #define WRITEONLY() do { \
1363 if (oldp != NULL || oldlenp != NULL) { \
1364 ret = EPERM; \
1365 goto label_return; \
1366 } \
1367 } while (0)
1368
1369 #define READ_XOR_WRITE() do { \
1370 if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
1371 newlen != 0)) { \
1372 ret = EPERM; \
1373 goto label_return; \
1374 } \
1375 } while (0)
1376
1377 #define READ(v, t) do { \
1378 if (oldp != NULL && oldlenp != NULL) { \
1379 if (*oldlenp != sizeof(t)) { \
1380 size_t copylen = (sizeof(t) <= *oldlenp) \
1381 ? sizeof(t) : *oldlenp; \
1382 memcpy(oldp, (void *)&(v), copylen); \
1383 ret = EINVAL; \
1384 goto label_return; \
1385 } \
1386 *(t *)oldp = (v); \
1387 } \
1388 } while (0)
1389
1390 #define WRITE(v, t) do { \
1391 if (newp != NULL) { \
1392 if (newlen != sizeof(t)) { \
1393 ret = EINVAL; \
1394 goto label_return; \
1395 } \
1396 (v) = *(t *)newp; \
1397 } \
1398 } while (0)
1399
1400 #define MIB_UNSIGNED(v, i) do { \
1401 if (mib[i] > UINT_MAX) { \
1402 ret = EFAULT; \
1403 goto label_return; \
1404 } \
1405 v = (unsigned)mib[i]; \
1406 } while (0)
1407
1408 /*
1409 * There's a lot of code duplication in the following macros due to limitations
1410 * in how nested cpp macros are expanded.
1411 */
1412 #define CTL_RO_CLGEN(c, l, n, v, t) \
1413 static int \
1414 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1415 size_t *oldlenp, void *newp, size_t newlen) { \
1416 int ret; \
1417 t oldval; \
1418 \
1419 if (!(c)) { \
1420 return ENOENT; \
1421 } \
1422 if (l) { \
1423 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1424 } \
1425 READONLY(); \
1426 oldval = (v); \
1427 READ(oldval, t); \
1428 \
1429 ret = 0; \
1430 label_return: \
1431 if (l) { \
1432 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1433 } \
1434 return ret; \
1435 }
1436
1437 #define CTL_RO_CGEN(c, n, v, t) \
1438 static int \
1439 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1440 size_t *oldlenp, void *newp, size_t newlen) { \
1441 int ret; \
1442 t oldval; \
1443 \
1444 if (!(c)) { \
1445 return ENOENT; \
1446 } \
1447 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1448 READONLY(); \
1449 oldval = (v); \
1450 READ(oldval, t); \
1451 \
1452 ret = 0; \
1453 label_return: \
1454 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1455 return ret; \
1456 }
1457
1458 #define CTL_RO_GEN(n, v, t) \
1459 static int \
1460 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1461 size_t *oldlenp, void *newp, size_t newlen) { \
1462 int ret; \
1463 t oldval; \
1464 \
1465 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1466 READONLY(); \
1467 oldval = (v); \
1468 READ(oldval, t); \
1469 \
1470 ret = 0; \
1471 label_return: \
1472 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1473 return ret; \
1474 }
1475
1476 /*
1477 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1478 * mutate during the call.
1479 */
1480 #define CTL_RO_NL_CGEN(c, n, v, t) \
1481 static int \
1482 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1483 size_t *oldlenp, void *newp, size_t newlen) { \
1484 int ret; \
1485 t oldval; \
1486 \
1487 if (!(c)) { \
1488 return ENOENT; \
1489 } \
1490 READONLY(); \
1491 oldval = (v); \
1492 READ(oldval, t); \
1493 \
1494 ret = 0; \
1495 label_return: \
1496 return ret; \
1497 }
1498
1499 #define CTL_RO_NL_GEN(n, v, t) \
1500 static int \
1501 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1502 size_t *oldlenp, void *newp, size_t newlen) { \
1503 int ret; \
1504 t oldval; \
1505 \
1506 READONLY(); \
1507 oldval = (v); \
1508 READ(oldval, t); \
1509 \
1510 ret = 0; \
1511 label_return: \
1512 return ret; \
1513 }
1514
1515 #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
1516 static int \
1517 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1518 size_t *oldlenp, void *newp, size_t newlen) { \
1519 int ret; \
1520 t oldval; \
1521 \
1522 if (!(c)) { \
1523 return ENOENT; \
1524 } \
1525 READONLY(); \
1526 oldval = (m(tsd)); \
1527 READ(oldval, t); \
1528 \
1529 ret = 0; \
1530 label_return: \
1531 return ret; \
1532 }
1533
1534 #define CTL_RO_CONFIG_GEN(n, t) \
1535 static int \
1536 n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1537 size_t *oldlenp, void *newp, size_t newlen) { \
1538 int ret; \
1539 t oldval; \
1540 \
1541 READONLY(); \
1542 oldval = n; \
1543 READ(oldval, t); \
1544 \
1545 ret = 0; \
1546 label_return: \
1547 return ret; \
1548 }
1549
1550 /******************************************************************************/
1551
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1552 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1553
1554 static int
1555 epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1556 size_t *oldlenp, void *newp, size_t newlen) {
1557 int ret;
1558 UNUSED uint64_t newval;
1559
1560 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1561 WRITE(newval, uint64_t);
1562 if (newp != NULL) {
1563 ctl_refresh(tsd_tsdn(tsd));
1564 }
1565 READ(ctl_arenas->epoch, uint64_t);
1566
1567 ret = 0;
1568 label_return:
1569 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1570 return ret;
1571 }
1572
1573 static int
background_thread_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1574 background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1575 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1576 int ret;
1577 bool oldval;
1578
1579 if (!have_background_thread) {
1580 return ENOENT;
1581 }
1582 background_thread_ctl_init(tsd_tsdn(tsd));
1583
1584 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1585 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1586 if (newp == NULL) {
1587 oldval = background_thread_enabled();
1588 READ(oldval, bool);
1589 } else {
1590 if (newlen != sizeof(bool)) {
1591 ret = EINVAL;
1592 goto label_return;
1593 }
1594 oldval = background_thread_enabled();
1595 READ(oldval, bool);
1596
1597 bool newval = *(bool *)newp;
1598 if (newval == oldval) {
1599 ret = 0;
1600 goto label_return;
1601 }
1602
1603 background_thread_enabled_set(tsd_tsdn(tsd), newval);
1604 if (newval) {
1605 if (!can_enable_background_thread) {
1606 malloc_printf("<jemalloc>: Error in dlsym("
1607 "RTLD_NEXT, \"pthread_create\"). Cannot "
1608 "enable background_thread\n");
1609 ret = EFAULT;
1610 goto label_return;
1611 }
1612 if (background_threads_enable(tsd)) {
1613 ret = EFAULT;
1614 goto label_return;
1615 }
1616 } else {
1617 if (background_threads_disable(tsd)) {
1618 ret = EFAULT;
1619 goto label_return;
1620 }
1621 }
1622 }
1623 ret = 0;
1624 label_return:
1625 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1626 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1627
1628 return ret;
1629 }
1630
1631 static int
max_background_threads_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1632 max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1633 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1634 int ret;
1635 size_t oldval;
1636
1637 if (!have_background_thread) {
1638 return ENOENT;
1639 }
1640 background_thread_ctl_init(tsd_tsdn(tsd));
1641
1642 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1643 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1644 if (newp == NULL) {
1645 oldval = max_background_threads;
1646 READ(oldval, size_t);
1647 } else {
1648 if (newlen != sizeof(size_t)) {
1649 ret = EINVAL;
1650 goto label_return;
1651 }
1652 oldval = max_background_threads;
1653 READ(oldval, size_t);
1654
1655 size_t newval = *(size_t *)newp;
1656 if (newval == oldval) {
1657 ret = 0;
1658 goto label_return;
1659 }
1660 if (newval > opt_max_background_threads) {
1661 ret = EINVAL;
1662 goto label_return;
1663 }
1664
1665 if (background_thread_enabled()) {
1666 if (!can_enable_background_thread) {
1667 malloc_printf("<jemalloc>: Error in dlsym("
1668 "RTLD_NEXT, \"pthread_create\"). Cannot "
1669 "enable background_thread\n");
1670 ret = EFAULT;
1671 goto label_return;
1672 }
1673 background_thread_enabled_set(tsd_tsdn(tsd), false);
1674 if (background_threads_disable(tsd)) {
1675 ret = EFAULT;
1676 goto label_return;
1677 }
1678 max_background_threads = newval;
1679 background_thread_enabled_set(tsd_tsdn(tsd), true);
1680 if (background_threads_enable(tsd)) {
1681 ret = EFAULT;
1682 goto label_return;
1683 }
1684 } else {
1685 max_background_threads = newval;
1686 }
1687 }
1688 ret = 0;
1689 label_return:
1690 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1691 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1692
1693 return ret;
1694 }
1695
1696 /******************************************************************************/
1697
CTL_RO_CONFIG_GEN(config_cache_oblivious,bool)1698 CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1699 CTL_RO_CONFIG_GEN(config_debug, bool)
1700 CTL_RO_CONFIG_GEN(config_fill, bool)
1701 CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1702 CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1703 CTL_RO_CONFIG_GEN(config_prof, bool)
1704 CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1705 CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1706 CTL_RO_CONFIG_GEN(config_stats, bool)
1707 CTL_RO_CONFIG_GEN(config_utrace, bool)
1708 CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1709
1710 /******************************************************************************/
1711
1712 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1713 CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
1714 CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
1715 const char *)
1716 CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
1717 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1718 CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1719 CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
1720 const char *)
1721 CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
1722 CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
1723 CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
1724 CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
1725 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1726 CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
1727 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1728 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1729 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1730 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1731 CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
1732 CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
1733 CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
1734 size_t)
1735 CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1736 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1737 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1738 CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1739 CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1740 opt_prof_thread_active_init, bool)
1741 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1742 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1743 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1744 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1745 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1746 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1747
1748 /******************************************************************************/
1749
1750 static int
1751 thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1752 size_t *oldlenp, void *newp, size_t newlen) {
1753 int ret;
1754 arena_t *oldarena;
1755 unsigned newind, oldind;
1756
1757 oldarena = arena_choose(tsd, NULL);
1758 if (oldarena == NULL) {
1759 return EAGAIN;
1760 }
1761 newind = oldind = arena_ind_get(oldarena);
1762 WRITE(newind, unsigned);
1763 READ(oldind, unsigned);
1764
1765 if (newind != oldind) {
1766 arena_t *newarena;
1767
1768 if (newind >= narenas_total_get()) {
1769 /* New arena index is out of range. */
1770 ret = EFAULT;
1771 goto label_return;
1772 }
1773
1774 if (have_percpu_arena &&
1775 PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
1776 if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
1777 /*
1778 * If perCPU arena is enabled, thread_arena
1779 * control is not allowed for the auto arena
1780 * range.
1781 */
1782 ret = EPERM;
1783 goto label_return;
1784 }
1785 }
1786
1787 /* Initialize arena if necessary. */
1788 newarena = arena_get(tsd_tsdn(tsd), newind, true);
1789 if (newarena == NULL) {
1790 ret = EAGAIN;
1791 goto label_return;
1792 }
1793 /* Set new arena/tcache associations. */
1794 arena_migrate(tsd, oldind, newind);
1795 if (tcache_available(tsd)) {
1796 tcache_arena_reassociate(tsd_tsdn(tsd),
1797 tsd_tcachep_get(tsd), newarena);
1798 }
1799 }
1800
1801 ret = 0;
1802 label_return:
1803 return ret;
1804 }
1805
CTL_TSD_RO_NL_CGEN(config_stats,thread_allocated,tsd_thread_allocated_get,uint64_t)1806 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1807 uint64_t)
1808 CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1809 uint64_t *)
1810 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1811 uint64_t)
1812 CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1813 tsd_thread_deallocatedp_get, uint64_t *)
1814
1815 static int
1816 thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1817 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1818 int ret;
1819 bool oldval;
1820
1821 oldval = tcache_enabled_get(tsd);
1822 if (newp != NULL) {
1823 if (newlen != sizeof(bool)) {
1824 ret = EINVAL;
1825 goto label_return;
1826 }
1827 tcache_enabled_set(tsd, *(bool *)newp);
1828 }
1829 READ(oldval, bool);
1830
1831 ret = 0;
1832 label_return:
1833 return ret;
1834 }
1835
1836 static int
thread_tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1837 thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1838 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1839 int ret;
1840
1841 if (!tcache_available(tsd)) {
1842 ret = EFAULT;
1843 goto label_return;
1844 }
1845
1846 READONLY();
1847 WRITEONLY();
1848
1849 tcache_flush(tsd);
1850
1851 ret = 0;
1852 label_return:
1853 return ret;
1854 }
1855
1856 static int
thread_prof_name_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1857 thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1858 size_t *oldlenp, void *newp, size_t newlen) {
1859 int ret;
1860
1861 if (!config_prof) {
1862 return ENOENT;
1863 }
1864
1865 READ_XOR_WRITE();
1866
1867 if (newp != NULL) {
1868 if (newlen != sizeof(const char *)) {
1869 ret = EINVAL;
1870 goto label_return;
1871 }
1872
1873 if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1874 0) {
1875 goto label_return;
1876 }
1877 } else {
1878 const char *oldname = prof_thread_name_get(tsd);
1879 READ(oldname, const char *);
1880 }
1881
1882 ret = 0;
1883 label_return:
1884 return ret;
1885 }
1886
1887 static int
thread_prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1888 thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1889 size_t *oldlenp, void *newp, size_t newlen) {
1890 int ret;
1891 bool oldval;
1892
1893 if (!config_prof) {
1894 return ENOENT;
1895 }
1896
1897 oldval = prof_thread_active_get(tsd);
1898 if (newp != NULL) {
1899 if (newlen != sizeof(bool)) {
1900 ret = EINVAL;
1901 goto label_return;
1902 }
1903 if (prof_thread_active_set(tsd, *(bool *)newp)) {
1904 ret = EAGAIN;
1905 goto label_return;
1906 }
1907 }
1908 READ(oldval, bool);
1909
1910 ret = 0;
1911 label_return:
1912 return ret;
1913 }
1914
1915 /******************************************************************************/
1916
1917 static int
tcache_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1918 tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1919 size_t *oldlenp, void *newp, size_t newlen) {
1920 int ret;
1921 unsigned tcache_ind;
1922
1923 READONLY();
1924 if (tcaches_create(tsd, &tcache_ind)) {
1925 ret = EFAULT;
1926 goto label_return;
1927 }
1928 READ(tcache_ind, unsigned);
1929
1930 ret = 0;
1931 label_return:
1932 return ret;
1933 }
1934
1935 static int
tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1936 tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1937 size_t *oldlenp, void *newp, size_t newlen) {
1938 int ret;
1939 unsigned tcache_ind;
1940
1941 WRITEONLY();
1942 tcache_ind = UINT_MAX;
1943 WRITE(tcache_ind, unsigned);
1944 if (tcache_ind == UINT_MAX) {
1945 ret = EFAULT;
1946 goto label_return;
1947 }
1948 tcaches_flush(tsd, tcache_ind);
1949
1950 ret = 0;
1951 label_return:
1952 return ret;
1953 }
1954
1955 static int
tcache_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1956 tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1957 size_t *oldlenp, void *newp, size_t newlen) {
1958 int ret;
1959 unsigned tcache_ind;
1960
1961 WRITEONLY();
1962 tcache_ind = UINT_MAX;
1963 WRITE(tcache_ind, unsigned);
1964 if (tcache_ind == UINT_MAX) {
1965 ret = EFAULT;
1966 goto label_return;
1967 }
1968 tcaches_destroy(tsd, tcache_ind);
1969
1970 ret = 0;
1971 label_return:
1972 return ret;
1973 }
1974
1975 /******************************************************************************/
1976
1977 static int
arena_i_initialized_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1978 arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1979 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1980 int ret;
1981 tsdn_t *tsdn = tsd_tsdn(tsd);
1982 unsigned arena_ind;
1983 bool initialized;
1984
1985 READONLY();
1986 MIB_UNSIGNED(arena_ind, 1);
1987
1988 malloc_mutex_lock(tsdn, &ctl_mtx);
1989 initialized = arenas_i(arena_ind)->initialized;
1990 malloc_mutex_unlock(tsdn, &ctl_mtx);
1991
1992 READ(initialized, bool);
1993
1994 ret = 0;
1995 label_return:
1996 return ret;
1997 }
1998
1999 static void
arena_i_decay(tsdn_t * tsdn,unsigned arena_ind,bool all)2000 arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
2001 malloc_mutex_lock(tsdn, &ctl_mtx);
2002 {
2003 unsigned narenas = ctl_arenas->narenas;
2004
2005 /*
2006 * Access via index narenas is deprecated, and scheduled for
2007 * removal in 6.0.0.
2008 */
2009 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
2010 unsigned i;
2011 VARIABLE_ARRAY(arena_t *, tarenas, narenas);
2012
2013 for (i = 0; i < narenas; i++) {
2014 tarenas[i] = arena_get(tsdn, i, false);
2015 }
2016
2017 /*
2018 * No further need to hold ctl_mtx, since narenas and
2019 * tarenas contain everything needed below.
2020 */
2021 malloc_mutex_unlock(tsdn, &ctl_mtx);
2022
2023 for (i = 0; i < narenas; i++) {
2024 if (tarenas[i] != NULL) {
2025 arena_decay(tsdn, tarenas[i], false,
2026 all);
2027 }
2028 }
2029 } else {
2030 arena_t *tarena;
2031
2032 assert(arena_ind < narenas);
2033
2034 tarena = arena_get(tsdn, arena_ind, false);
2035
2036 /* No further need to hold ctl_mtx. */
2037 malloc_mutex_unlock(tsdn, &ctl_mtx);
2038
2039 if (tarena != NULL) {
2040 arena_decay(tsdn, tarena, false, all);
2041 }
2042 }
2043 }
2044 }
2045
2046 static int
arena_i_decay_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2047 arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2048 size_t *oldlenp, void *newp, size_t newlen) {
2049 int ret;
2050 unsigned arena_ind;
2051
2052 READONLY();
2053 WRITEONLY();
2054 MIB_UNSIGNED(arena_ind, 1);
2055 arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
2056
2057 ret = 0;
2058 label_return:
2059 return ret;
2060 }
2061
2062 static int
arena_i_purge_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2063 arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2064 size_t *oldlenp, void *newp, size_t newlen) {
2065 int ret;
2066 unsigned arena_ind;
2067
2068 READONLY();
2069 WRITEONLY();
2070 MIB_UNSIGNED(arena_ind, 1);
2071 arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2072
2073 ret = 0;
2074 label_return:
2075 return ret;
2076 }
2077
2078 static int
arena_i_reset_destroy_helper(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,unsigned * arena_ind,arena_t ** arena)2079 arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2080 void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2081 arena_t **arena) {
2082 int ret;
2083
2084 READONLY();
2085 WRITEONLY();
2086 MIB_UNSIGNED(*arena_ind, 1);
2087
2088 *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2089 if (*arena == NULL || arena_is_auto(*arena)) {
2090 ret = EFAULT;
2091 goto label_return;
2092 }
2093
2094 ret = 0;
2095 label_return:
2096 return ret;
2097 }
2098
2099 static void
arena_reset_prepare_background_thread(tsd_t * tsd,unsigned arena_ind)2100 arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2101 /* Temporarily disable the background thread during arena reset. */
2102 if (have_background_thread) {
2103 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2104 if (background_thread_enabled()) {
2105 unsigned ind = arena_ind % ncpus;
2106 background_thread_info_t *info =
2107 &background_thread_info[ind];
2108 assert(info->state == background_thread_started);
2109 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2110 info->state = background_thread_paused;
2111 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2112 }
2113 }
2114 }
2115
2116 static void
arena_reset_finish_background_thread(tsd_t * tsd,unsigned arena_ind)2117 arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2118 if (have_background_thread) {
2119 if (background_thread_enabled()) {
2120 unsigned ind = arena_ind % ncpus;
2121 background_thread_info_t *info =
2122 &background_thread_info[ind];
2123 assert(info->state == background_thread_paused);
2124 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2125 info->state = background_thread_started;
2126 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2127 }
2128 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2129 }
2130 }
2131
2132 static int
arena_i_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2133 arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2134 size_t *oldlenp, void *newp, size_t newlen) {
2135 int ret;
2136 unsigned arena_ind;
2137 arena_t *arena;
2138
2139 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2140 newp, newlen, &arena_ind, &arena);
2141 if (ret != 0) {
2142 return ret;
2143 }
2144
2145 arena_reset_prepare_background_thread(tsd, arena_ind);
2146 arena_reset(tsd, arena);
2147 arena_reset_finish_background_thread(tsd, arena_ind);
2148
2149 return ret;
2150 }
2151
2152 static int
arena_i_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2153 arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2154 size_t *oldlenp, void *newp, size_t newlen) {
2155 int ret;
2156 unsigned arena_ind;
2157 arena_t *arena;
2158 ctl_arena_t *ctl_darena, *ctl_arena;
2159
2160 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2161 newp, newlen, &arena_ind, &arena);
2162 if (ret != 0) {
2163 goto label_return;
2164 }
2165
2166 if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2167 true) != 0) {
2168 ret = EFAULT;
2169 goto label_return;
2170 }
2171
2172 arena_reset_prepare_background_thread(tsd, arena_ind);
2173 /* Merge stats after resetting and purging arena. */
2174 arena_reset(tsd, arena);
2175 arena_decay(tsd_tsdn(tsd), arena, false, true);
2176 ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2177 ctl_darena->initialized = true;
2178 ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2179 /* Destroy arena. */
2180 arena_destroy(tsd, arena);
2181 ctl_arena = arenas_i(arena_ind);
2182 ctl_arena->initialized = false;
2183 /* Record arena index for later recycling via arenas.create. */
2184 ql_elm_new(ctl_arena, destroyed_link);
2185 ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2186 arena_reset_finish_background_thread(tsd, arena_ind);
2187
2188 assert(ret == 0);
2189 label_return:
2190 return ret;
2191 }
2192
2193 static int
arena_i_dss_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2194 arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2195 size_t *oldlenp, void *newp, size_t newlen) {
2196 int ret;
2197 const char *dss = NULL;
2198 unsigned arena_ind;
2199 dss_prec_t dss_prec_old = dss_prec_limit;
2200 dss_prec_t dss_prec = dss_prec_limit;
2201
2202 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2203 WRITE(dss, const char *);
2204 MIB_UNSIGNED(arena_ind, 1);
2205 if (dss != NULL) {
2206 int i;
2207 bool match = false;
2208
2209 for (i = 0; i < dss_prec_limit; i++) {
2210 if (strcmp(dss_prec_names[i], dss) == 0) {
2211 dss_prec = i;
2212 match = true;
2213 break;
2214 }
2215 }
2216
2217 if (!match) {
2218 ret = EINVAL;
2219 goto label_return;
2220 }
2221 }
2222
2223 /*
2224 * Access via index narenas is deprecated, and scheduled for removal in
2225 * 6.0.0.
2226 */
2227 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2228 ctl_arenas->narenas) {
2229 if (dss_prec != dss_prec_limit &&
2230 extent_dss_prec_set(dss_prec)) {
2231 ret = EFAULT;
2232 goto label_return;
2233 }
2234 dss_prec_old = extent_dss_prec_get();
2235 } else {
2236 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2237 if (arena == NULL || (dss_prec != dss_prec_limit &&
2238 arena_dss_prec_set(arena, dss_prec))) {
2239 ret = EFAULT;
2240 goto label_return;
2241 }
2242 dss_prec_old = arena_dss_prec_get(arena);
2243 }
2244
2245 dss = dss_prec_names[dss_prec_old];
2246 READ(dss, const char *);
2247
2248 ret = 0;
2249 label_return:
2250 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2251 return ret;
2252 }
2253
2254 static int
arena_i_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2255 arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2256 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2257 int ret;
2258 unsigned arena_ind;
2259 arena_t *arena;
2260
2261 MIB_UNSIGNED(arena_ind, 1);
2262 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2263 if (arena == NULL) {
2264 ret = EFAULT;
2265 goto label_return;
2266 }
2267
2268 if (oldp != NULL && oldlenp != NULL) {
2269 size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
2270 arena_muzzy_decay_ms_get(arena);
2271 READ(oldval, ssize_t);
2272 }
2273 if (newp != NULL) {
2274 if (newlen != sizeof(ssize_t)) {
2275 ret = EINVAL;
2276 goto label_return;
2277 }
2278 if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
2279 *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
2280 arena, *(ssize_t *)newp)) {
2281 ret = EFAULT;
2282 goto label_return;
2283 }
2284 }
2285
2286 ret = 0;
2287 label_return:
2288 return ret;
2289 }
2290
2291 static int
arena_i_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2292 arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2293 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2294 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2295 newlen, true);
2296 }
2297
2298 static int
arena_i_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2299 arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2300 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2301 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2302 newlen, false);
2303 }
2304
2305 static int
arena_i_extent_hooks_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2306 arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2307 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2308 int ret;
2309 unsigned arena_ind;
2310 arena_t *arena;
2311
2312 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2313 MIB_UNSIGNED(arena_ind, 1);
2314 if (arena_ind < narenas_total_get()) {
2315 extent_hooks_t *old_extent_hooks;
2316 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2317 if (arena == NULL) {
2318 if (arena_ind >= narenas_auto) {
2319 ret = EFAULT;
2320 goto label_return;
2321 }
2322 old_extent_hooks =
2323 (extent_hooks_t *)&extent_hooks_default;
2324 READ(old_extent_hooks, extent_hooks_t *);
2325 if (newp != NULL) {
2326 /* Initialize a new arena as a side effect. */
2327 extent_hooks_t *new_extent_hooks
2328 JEMALLOC_CC_SILENCE_INIT(NULL);
2329 WRITE(new_extent_hooks, extent_hooks_t *);
2330 arena = arena_init(tsd_tsdn(tsd), arena_ind,
2331 new_extent_hooks);
2332 if (arena == NULL) {
2333 ret = EFAULT;
2334 goto label_return;
2335 }
2336 }
2337 } else {
2338 if (newp != NULL) {
2339 extent_hooks_t *new_extent_hooks
2340 JEMALLOC_CC_SILENCE_INIT(NULL);
2341 WRITE(new_extent_hooks, extent_hooks_t *);
2342 old_extent_hooks = extent_hooks_set(tsd, arena,
2343 new_extent_hooks);
2344 READ(old_extent_hooks, extent_hooks_t *);
2345 } else {
2346 old_extent_hooks = extent_hooks_get(arena);
2347 READ(old_extent_hooks, extent_hooks_t *);
2348 }
2349 }
2350 } else {
2351 ret = EFAULT;
2352 goto label_return;
2353 }
2354 ret = 0;
2355 label_return:
2356 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2357 return ret;
2358 }
2359
2360 static int
arena_i_retain_grow_limit_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2361 arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2362 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2363 int ret;
2364 unsigned arena_ind;
2365 arena_t *arena;
2366
2367 if (!opt_retain) {
2368 /* Only relevant when retain is enabled. */
2369 return ENOENT;
2370 }
2371
2372 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2373 MIB_UNSIGNED(arena_ind, 1);
2374 if (arena_ind < narenas_total_get() && (arena =
2375 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2376 size_t old_limit, new_limit;
2377 if (newp != NULL) {
2378 WRITE(new_limit, size_t);
2379 }
2380 bool err = arena_retain_grow_limit_get_set(tsd, arena,
2381 &old_limit, newp != NULL ? &new_limit : NULL);
2382 if (!err) {
2383 READ(old_limit, size_t);
2384 ret = 0;
2385 } else {
2386 ret = EFAULT;
2387 }
2388 } else {
2389 ret = EFAULT;
2390 }
2391 label_return:
2392 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2393 return ret;
2394 }
2395
2396 static const ctl_named_node_t *
arena_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2397 arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2398 const ctl_named_node_t *ret;
2399
2400 malloc_mutex_lock(tsdn, &ctl_mtx);
2401 switch (i) {
2402 case MALLCTL_ARENAS_ALL:
2403 case MALLCTL_ARENAS_DESTROYED:
2404 break;
2405 default:
2406 if (i > ctl_arenas->narenas) {
2407 ret = NULL;
2408 goto label_return;
2409 }
2410 break;
2411 }
2412
2413 ret = super_arena_i_node;
2414 label_return:
2415 malloc_mutex_unlock(tsdn, &ctl_mtx);
2416 return ret;
2417 }
2418
2419 /******************************************************************************/
2420
2421 static int
arenas_narenas_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2422 arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2423 size_t *oldlenp, void *newp, size_t newlen) {
2424 int ret;
2425 unsigned narenas;
2426
2427 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2428 READONLY();
2429 if (*oldlenp != sizeof(unsigned)) {
2430 ret = EINVAL;
2431 goto label_return;
2432 }
2433 narenas = ctl_arenas->narenas;
2434 READ(narenas, unsigned);
2435
2436 ret = 0;
2437 label_return:
2438 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2439 return ret;
2440 }
2441
2442 static int
arenas_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2443 arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2444 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2445 int ret;
2446
2447 if (oldp != NULL && oldlenp != NULL) {
2448 size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
2449 arena_muzzy_decay_ms_default_get());
2450 READ(oldval, ssize_t);
2451 }
2452 if (newp != NULL) {
2453 if (newlen != sizeof(ssize_t)) {
2454 ret = EINVAL;
2455 goto label_return;
2456 }
2457 if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
2458 : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
2459 ret = EFAULT;
2460 goto label_return;
2461 }
2462 }
2463
2464 ret = 0;
2465 label_return:
2466 return ret;
2467 }
2468
2469 static int
arenas_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2470 arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2471 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2472 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2473 newlen, true);
2474 }
2475
2476 static int
arenas_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2477 arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2478 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2479 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2480 newlen, false);
2481 }
2482
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)2483 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
2484 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
2485 CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
2486 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
2487 CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
2488 CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
2489 CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
2490 CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
2491 static const ctl_named_node_t *
2492 arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2493 if (i > NBINS) {
2494 return NULL;
2495 }
2496 return super_arenas_bin_i_node;
2497 }
2498
2499 CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
2500 CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
2501 size_t)
2502 static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2503 arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2504 size_t i) {
2505 if (i > NSIZES - NBINS) {
2506 return NULL;
2507 }
2508 return super_arenas_lextent_i_node;
2509 }
2510
2511 static int
arenas_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2512 arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2513 size_t *oldlenp, void *newp, size_t newlen) {
2514 int ret;
2515 extent_hooks_t *extent_hooks;
2516 unsigned arena_ind;
2517
2518 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2519
2520 extent_hooks = (extent_hooks_t *)&extent_hooks_default;
2521 WRITE(extent_hooks, extent_hooks_t *);
2522 if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
2523 ret = EAGAIN;
2524 goto label_return;
2525 }
2526 READ(arena_ind, unsigned);
2527
2528 ret = 0;
2529 label_return:
2530 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2531 return ret;
2532 }
2533
2534 static int
arenas_lookup_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2535 arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2536 size_t *oldlenp, void *newp, size_t newlen) {
2537 int ret;
2538 unsigned arena_ind;
2539 void *ptr;
2540 extent_t *extent;
2541 arena_t *arena;
2542
2543 ptr = NULL;
2544 ret = EINVAL;
2545 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2546 WRITE(ptr, void *);
2547 extent = iealloc(tsd_tsdn(tsd), ptr);
2548 if (extent == NULL)
2549 goto label_return;
2550
2551 arena = extent_arena_get(extent);
2552 if (arena == NULL)
2553 goto label_return;
2554
2555 arena_ind = arena_ind_get(arena);
2556 READ(arena_ind, unsigned);
2557
2558 ret = 0;
2559 label_return:
2560 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2561 return ret;
2562 }
2563
2564 /******************************************************************************/
2565
2566 static int
prof_thread_active_init_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2567 prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2568 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2569 int ret;
2570 bool oldval;
2571
2572 if (!config_prof) {
2573 return ENOENT;
2574 }
2575
2576 if (newp != NULL) {
2577 if (newlen != sizeof(bool)) {
2578 ret = EINVAL;
2579 goto label_return;
2580 }
2581 oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2582 *(bool *)newp);
2583 } else {
2584 oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2585 }
2586 READ(oldval, bool);
2587
2588 ret = 0;
2589 label_return:
2590 return ret;
2591 }
2592
2593 static int
prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2594 prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2595 size_t *oldlenp, void *newp, size_t newlen) {
2596 int ret;
2597 bool oldval;
2598
2599 if (!config_prof) {
2600 return ENOENT;
2601 }
2602
2603 if (newp != NULL) {
2604 if (newlen != sizeof(bool)) {
2605 ret = EINVAL;
2606 goto label_return;
2607 }
2608 oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2609 } else {
2610 oldval = prof_active_get(tsd_tsdn(tsd));
2611 }
2612 READ(oldval, bool);
2613
2614 ret = 0;
2615 label_return:
2616 return ret;
2617 }
2618
2619 static int
prof_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2620 prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2621 size_t *oldlenp, void *newp, size_t newlen) {
2622 int ret;
2623 const char *filename = NULL;
2624
2625 if (!config_prof) {
2626 return ENOENT;
2627 }
2628
2629 WRITEONLY();
2630 WRITE(filename, const char *);
2631
2632 if (prof_mdump(tsd, filename)) {
2633 ret = EFAULT;
2634 goto label_return;
2635 }
2636
2637 ret = 0;
2638 label_return:
2639 return ret;
2640 }
2641
2642 static int
prof_gdump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2643 prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2644 size_t *oldlenp, void *newp, size_t newlen) {
2645 int ret;
2646 bool oldval;
2647
2648 if (!config_prof) {
2649 return ENOENT;
2650 }
2651
2652 if (newp != NULL) {
2653 if (newlen != sizeof(bool)) {
2654 ret = EINVAL;
2655 goto label_return;
2656 }
2657 oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2658 } else {
2659 oldval = prof_gdump_get(tsd_tsdn(tsd));
2660 }
2661 READ(oldval, bool);
2662
2663 ret = 0;
2664 label_return:
2665 return ret;
2666 }
2667
2668 static int
prof_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2669 prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2670 size_t *oldlenp, void *newp, size_t newlen) {
2671 int ret;
2672 size_t lg_sample = lg_prof_sample;
2673
2674 if (!config_prof) {
2675 return ENOENT;
2676 }
2677
2678 WRITEONLY();
2679 WRITE(lg_sample, size_t);
2680 if (lg_sample >= (sizeof(uint64_t) << 3)) {
2681 lg_sample = (sizeof(uint64_t) << 3) - 1;
2682 }
2683
2684 prof_reset(tsd, lg_sample);
2685
2686 ret = 0;
2687 label_return:
2688 return ret;
2689 }
2690
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)2691 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2692 CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2693
2694 /******************************************************************************/
2695
2696 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2697 CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2698 CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2699 CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
2700 CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2701 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2702 CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2703
2704 CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
2705 ctl_stats->background_thread.num_threads, size_t)
2706 CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
2707 ctl_stats->background_thread.num_runs, uint64_t)
2708 CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
2709 nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
2710
2711 CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2712 CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
2713 ssize_t)
2714 CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
2715 ssize_t)
2716 CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2717 CTL_RO_GEN(stats_arenas_i_uptime,
2718 nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
2719 CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2720 CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2721 CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
2722 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2723 atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
2724 size_t)
2725 #if !defined(ANDROID_MINIMIZE_STRUCTS)
2726 CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2727 atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
2728 size_t)
2729
2730 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
2731 ctl_arena_stats_read_u64(
2732 &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
2733 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
2734 ctl_arena_stats_read_u64(
2735 &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
2736 CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
2737 ctl_arena_stats_read_u64(
2738 &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
2739
2740 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
2741 ctl_arena_stats_read_u64(
2742 &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
2743 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
2744 ctl_arena_stats_read_u64(
2745 &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
2746 CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
2747 ctl_arena_stats_read_u64(
2748 &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
2749
2750 CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2751 atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
2752 size_t)
2753 CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2754 atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
2755 size_t)
2756 CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
2757 atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
2758 ATOMIC_RELAXED), size_t)
2759 CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
2760 atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
2761 ATOMIC_RELAXED), size_t)
2762 CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2763 atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
2764 size_t)
2765 #endif
2766
2767 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2768 arenas_i(mib[2])->astats->allocated_small, size_t)
2769 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2770 arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2771 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2772 arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2773 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2774 arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2775 #if !defined(ANDROID_MINIMIZE_STRUCTS)
2776 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2777 atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
2778 ATOMIC_RELAXED), size_t)
2779 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2780 ctl_arena_stats_read_u64(
2781 &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
2782 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2783 ctl_arena_stats_read_u64(
2784 &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
2785 /*
2786 * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
2787 */
2788 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2789 ctl_arena_stats_read_u64(
2790 &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
2791 #endif
2792
2793 /* Lock profiling related APIs below. */
2794 #define RO_MUTEX_CTL_GEN(n, l) \
2795 CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
2796 l.n_lock_ops, uint64_t) \
2797 CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
2798 l.n_wait_times, uint64_t) \
2799 CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
2800 l.n_spin_acquired, uint64_t) \
2801 CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
2802 l.n_owner_switches, uint64_t) \
2803 CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
2804 nstime_ns(&l.tot_wait_time), uint64_t) \
2805 CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
2806 nstime_ns(&l.max_wait_time), uint64_t) \
2807 CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
2808 l.max_n_thds, uint32_t)
2809
2810 /* Global mutexes. */
2811 #define OP(mtx) \
2812 RO_MUTEX_CTL_GEN(mutexes_##mtx, \
2813 ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
2814 MUTEX_PROF_GLOBAL_MUTEXES
2815 #undef OP
2816
2817 #if !defined(ANDROID_MINIMIZE_STRUCTS)
2818 /* Per arena mutexes */
2819 #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
2820 arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
2821 MUTEX_PROF_ARENA_MUTEXES
2822 #undef OP
2823 #endif
2824
2825 /* tcache bin mutex */
2826 RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
2827 arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
2828 #undef RO_MUTEX_CTL_GEN
2829
2830 /* Resets all mutex stats, including global, arena and bin mutexes. */
2831 static int
2832 stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2833 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2834 if (!config_stats) {
2835 return ENOENT;
2836 }
2837
2838 tsdn_t *tsdn = tsd_tsdn(tsd);
2839
2840 #define MUTEX_PROF_RESET(mtx) \
2841 malloc_mutex_lock(tsdn, &mtx); \
2842 malloc_mutex_prof_data_reset(tsdn, &mtx); \
2843 malloc_mutex_unlock(tsdn, &mtx);
2844
2845 /* Global mutexes: ctl and prof. */
2846 MUTEX_PROF_RESET(ctl_mtx);
2847 if (have_background_thread) {
2848 MUTEX_PROF_RESET(background_thread_lock);
2849 }
2850 if (config_prof && opt_prof) {
2851 MUTEX_PROF_RESET(bt2gctx_mtx);
2852 }
2853
2854
2855 /* Per arena mutexes. */
2856 unsigned n = narenas_total_get();
2857
2858 for (unsigned i = 0; i < n; i++) {
2859 arena_t *arena = arena_get(tsdn, i, false);
2860 if (!arena) {
2861 continue;
2862 }
2863 MUTEX_PROF_RESET(arena->large_mtx);
2864 MUTEX_PROF_RESET(arena->extent_avail_mtx);
2865 MUTEX_PROF_RESET(arena->extents_dirty.mtx);
2866 MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
2867 MUTEX_PROF_RESET(arena->extents_retained.mtx);
2868 MUTEX_PROF_RESET(arena->decay_dirty.mtx);
2869 MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
2870 #if defined(ANDROID_ENABLE_TCACHE)
2871 MUTEX_PROF_RESET(arena->tcache_ql_mtx);
2872 #endif
2873 MUTEX_PROF_RESET(arena->base->mtx);
2874
2875 for (szind_t i = 0; i < NBINS; i++) {
2876 bin_t *bin = &arena->bins[i];
2877 MUTEX_PROF_RESET(bin->lock);
2878 }
2879 }
2880 #undef MUTEX_PROF_RESET
2881 return 0;
2882 }
2883
2884 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2885 arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
2886 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2887 arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
2888 #if !defined(ANDROID_MINIMIZE_STRUCTS)
2889 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2890 arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
2891 #endif
2892 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2893 arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
2894 #if !defined(ANDROID_MINIMIZE_STRUCTS)
2895 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
2896 arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
2897 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
2898 arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
2899 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
2900 arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
2901 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
2902 arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
2903 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
2904 arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
2905 #endif
2906
2907 static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)2908 stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2909 size_t j) {
2910 if (j > NBINS) {
2911 return NULL;
2912 }
2913 return super_stats_arenas_i_bins_j_node;
2914 }
2915
2916 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
2917 ctl_arena_stats_read_u64(
2918 &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
2919 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
2920 ctl_arena_stats_read_u64(
2921 &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
2922 #if !defined(ANDROID_MINIMIZE_STRUCTS)
2923 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
2924 ctl_arena_stats_read_u64(
2925 &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
2926 CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
2927 arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
2928 #endif
2929
2930 static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)2931 stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2932 size_t j) {
2933 if (j > NSIZES - NBINS) {
2934 return NULL;
2935 }
2936 return super_stats_arenas_i_lextents_j_node;
2937 }
2938
2939 static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2940 stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
2941 const ctl_named_node_t *ret;
2942 size_t a;
2943
2944 malloc_mutex_lock(tsdn, &ctl_mtx);
2945 a = arenas_i2a_impl(i, true, true);
2946 if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
2947 ret = NULL;
2948 goto label_return;
2949 }
2950
2951 ret = super_stats_arenas_i_node;
2952 label_return:
2953 malloc_mutex_unlock(tsdn, &ctl_mtx);
2954 return ret;
2955 }
2956