xref: /aosp_15_r20/external/jemalloc_new/include/jemalloc/internal/arena_structs_b.h (revision 1208bc7e437ced7eb82efac44ba17e3beba411da)
1 #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
2 #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
3 
4 #include "jemalloc/internal/arena_stats.h"
5 #include "jemalloc/internal/atomic.h"
6 #include "jemalloc/internal/bin.h"
7 #include "jemalloc/internal/bitmap.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/jemalloc_internal_types.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/nstime.h"
12 #include "jemalloc/internal/ql.h"
13 #include "jemalloc/internal/size_classes.h"
14 #include "jemalloc/internal/smoothstep.h"
15 #include "jemalloc/internal/ticker.h"
16 
17 struct arena_decay_s {
18 	/* Synchronizes all non-atomic fields. */
19 	malloc_mutex_t		mtx;
20 	/*
21 	 * True if a thread is currently purging the extents associated with
22 	 * this decay structure.
23 	 */
24 	bool			purging;
25 	/*
26 	 * Approximate time in milliseconds from the creation of a set of unused
27 	 * dirty pages until an equivalent set of unused dirty pages is purged
28 	 * and/or reused.
29 	 */
30 	atomic_zd_t		time_ms;
31 	/* time / SMOOTHSTEP_NSTEPS. */
32 	nstime_t		interval;
33 	/*
34 	 * Time at which the current decay interval logically started.  We do
35 	 * not actually advance to a new epoch until sometime after it starts
36 	 * because of scheduling and computation delays, and it is even possible
37 	 * to completely skip epochs.  In all cases, during epoch advancement we
38 	 * merge all relevant activity into the most recently recorded epoch.
39 	 */
40 	nstime_t		epoch;
41 	/* Deadline randomness generator. */
42 	uint64_t		jitter_state;
43 	/*
44 	 * Deadline for current epoch.  This is the sum of interval and per
45 	 * epoch jitter which is a uniform random variable in [0..interval).
46 	 * Epochs always advance by precise multiples of interval, but we
47 	 * randomize the deadline to reduce the likelihood of arenas purging in
48 	 * lockstep.
49 	 */
50 	nstime_t		deadline;
51 	/*
52 	 * Number of unpurged pages at beginning of current epoch.  During epoch
53 	 * advancement we use the delta between arena->decay_*.nunpurged and
54 	 * extents_npages_get(&arena->extents_*) to determine how many dirty
55 	 * pages, if any, were generated.
56 	 */
57 	size_t			nunpurged;
58 	/*
59 	 * Trailing log of how many unused dirty pages were generated during
60 	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
61 	 * element is the most recent epoch.  Corresponding epoch times are
62 	 * relative to epoch.
63 	 */
64 	size_t			backlog[SMOOTHSTEP_NSTEPS];
65 
66 #if !defined(ANDROID_MINIMIZE_STRUCTS)
67 	/*
68 	 * Pointer to associated stats.  These stats are embedded directly in
69 	 * the arena's stats due to how stats structures are shared between the
70 	 * arena and ctl code.
71 	 *
72 	 * Synchronization: Same as associated arena's stats field. */
73 	arena_stats_decay_t	*stats;
74 	/* Peak number of pages in associated extents.  Used for debug only. */
75 	uint64_t		ceil_npages;
76 #endif
77 };
78 
79 struct arena_s {
80 	/*
81 	 * Number of threads currently assigned to this arena.  Each thread has
82 	 * two distinct assignments, one for application-serving allocation, and
83 	 * the other for internal metadata allocation.  Internal metadata must
84 	 * not be allocated from arenas explicitly created via the arenas.create
85 	 * mallctl, because the arena.<i>.reset mallctl indiscriminately
86 	 * discards all allocations for the affected arena.
87 	 *
88 	 *   0: Application allocation.
89 	 *   1: Internal metadata allocation.
90 	 *
91 	 * Synchronization: atomic.
92 	 */
93 	atomic_u_t		nthreads[2];
94 
95 	/*
96 	 * When percpu_arena is enabled, to amortize the cost of reading /
97 	 * updating the current CPU id, track the most recent thread accessing
98 	 * this arena, and only read CPU if there is a mismatch.
99 	 */
100 	tsdn_t		*last_thd;
101 
102 	/* Synchronization: internal. */
103 	arena_stats_t		stats;
104 
105 #if defined(ANDROID_ENABLE_TCACHE)
106 	/*
107 	 * Lists of tcaches and cache_bin_array_descriptors for extant threads
108 	 * associated with this arena.  Stats from these are merged
109 	 * incrementally, and at exit if opt_stats_print is enabled.
110 	 *
111 	 * Synchronization: tcache_ql_mtx.
112 	 */
113 	ql_head(tcache_t)			tcache_ql;
114 	ql_head(cache_bin_array_descriptor_t)	cache_bin_array_descriptor_ql;
115 	malloc_mutex_t				tcache_ql_mtx;
116 #endif
117 
118 #if !defined(ANDROID_MINIMIZE_STRUCTS)
119 	/* Synchronization: internal. */
120 	prof_accum_t		prof_accum;
121 	uint64_t		prof_accumbytes;
122 #endif
123 
124 	/*
125 	 * PRNG state for cache index randomization of large allocation base
126 	 * pointers.
127 	 *
128 	 * Synchronization: atomic.
129 	 */
130 	atomic_zu_t		offset_state;
131 
132 	/*
133 	 * Extent serial number generator state.
134 	 *
135 	 * Synchronization: atomic.
136 	 */
137 	atomic_zu_t		extent_sn_next;
138 
139 	/*
140 	 * Represents a dss_prec_t, but atomically.
141 	 *
142 	 * Synchronization: atomic.
143 	 */
144 	atomic_u_t		dss_prec;
145 
146 	/*
147 	 * Number of pages in active extents.
148 	 *
149 	 * Synchronization: atomic.
150 	 */
151 	atomic_zu_t		nactive;
152 
153 	/*
154 	 * Extant large allocations.
155 	 *
156 	 * Synchronization: large_mtx.
157 	 */
158 	extent_list_t		large;
159 	/* Synchronizes all large allocation/update/deallocation. */
160 	malloc_mutex_t		large_mtx;
161 
162 	/*
163 	 * Collections of extents that were previously allocated.  These are
164 	 * used when allocating extents, in an attempt to re-use address space.
165 	 *
166 	 * Synchronization: internal.
167 	 */
168 	extents_t		extents_dirty;
169 	extents_t		extents_muzzy;
170 	extents_t		extents_retained;
171 
172 	/*
173 	 * Decay-based purging state, responsible for scheduling extent state
174 	 * transitions.
175 	 *
176 	 * Synchronization: internal.
177 	 */
178 	arena_decay_t		decay_dirty; /* dirty --> muzzy */
179 	arena_decay_t		decay_muzzy; /* muzzy --> retained */
180 
181 	/*
182 	 * Next extent size class in a growing series to use when satisfying a
183 	 * request via the extent hooks (only if opt_retain).  This limits the
184 	 * number of disjoint virtual memory ranges so that extent merging can
185 	 * be effective even if multiple arenas' extent allocation requests are
186 	 * highly interleaved.
187 	 *
188 	 * retain_grow_limit is the max allowed size ind to expand (unless the
189 	 * required size is greater).  Default is no limit, and controlled
190 	 * through mallctl only.
191 	 *
192 	 * Synchronization: extent_grow_mtx
193 	 */
194 	pszind_t		extent_grow_next;
195 	pszind_t		retain_grow_limit;
196 	malloc_mutex_t		extent_grow_mtx;
197 
198 	/*
199 	 * Available extent structures that were allocated via
200 	 * base_alloc_extent().
201 	 *
202 	 * Synchronization: extent_avail_mtx.
203 	 */
204 	extent_tree_t		extent_avail;
205 	malloc_mutex_t		extent_avail_mtx;
206 
207 	/*
208 	 * bins is used to store heaps of free regions.
209 	 *
210 	 * Synchronization: internal.
211 	 */
212 	bin_t			bins[NBINS];
213 
214 	/*
215 	 * Base allocator, from which arena metadata are allocated.
216 	 *
217 	 * Synchronization: internal.
218 	 */
219 	base_t			*base;
220 	/* Used to determine uptime.  Read-only after initialization. */
221 	nstime_t		create_time;
222 };
223 
224 /* Used in conjunction with tsd for fast arena-related context lookup. */
225 struct arena_tdata_s {
226 	ticker_t		decay_ticker;
227 };
228 
229 /* Used to pass rtree lookup context down the path. */
230 struct alloc_ctx_s {
231 	szind_t szind;
232 	bool slab;
233 };
234 
235 #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
236