1 #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
2 #define JEMALLOC_INTERNAL_ARENA_STATS_H
3
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/mutex_prof.h"
7 #include "jemalloc/internal/size_classes.h"
8
9 /*
10 * In those architectures that support 64-bit atomics, we use atomic updates for
11 * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
12 * externally.
13 */
14 #ifdef JEMALLOC_ATOMIC_U64
15 typedef atomic_u64_t arena_stats_u64_t;
16 #else
17 /* Must hold the arena stats mutex while reading atomically. */
18 typedef uint64_t arena_stats_u64_t;
19 #endif
20
21 typedef struct arena_stats_large_s arena_stats_large_t;
22 struct arena_stats_large_s {
23 /*
24 * Total number of allocation/deallocation requests served directly by
25 * the arena.
26 */
27 arena_stats_u64_t nmalloc;
28 arena_stats_u64_t ndalloc;
29
30 #if !defined(ANDROID_MINIMIZE_STRUCTS)
31 /*
32 * Number of allocation requests that correspond to this size class.
33 * This includes requests served by tcache, though tcache only
34 * periodically merges into this counter.
35 */
36 arena_stats_u64_t nrequests; /* Partially derived. */
37
38 /* Current number of allocations of this size class. */
39 size_t curlextents; /* Derived. */
40 #endif
41 };
42
43 typedef struct arena_stats_decay_s arena_stats_decay_t;
44 struct arena_stats_decay_s {
45 /* Total number of purge sweeps. */
46 arena_stats_u64_t npurge;
47 /* Total number of madvise calls made. */
48 arena_stats_u64_t nmadvise;
49 /* Total number of pages purged. */
50 arena_stats_u64_t purged;
51 };
52
53 /*
54 * Arena stats. Note that fields marked "derived" are not directly maintained
55 * within the arena code; rather their values are derived during stats merge
56 * requests.
57 */
58 typedef struct arena_stats_s arena_stats_t;
59 struct arena_stats_s {
60 #ifndef JEMALLOC_ATOMIC_U64
61 malloc_mutex_t mtx;
62 #endif
63
64 /* Number of bytes currently mapped, excluding retained memory. */
65 atomic_zu_t mapped; /* Partially derived. */
66
67 #if !defined(ANDROID_MINIMIZE_STRUCTS)
68 /*
69 * Number of unused virtual memory bytes currently retained. Retained
70 * bytes are technically mapped (though always decommitted or purged),
71 * but they are excluded from the mapped statistic (above).
72 */
73 atomic_zu_t retained; /* Derived. */
74
75 arena_stats_decay_t decay_dirty;
76 arena_stats_decay_t decay_muzzy;
77
78 atomic_zu_t base; /* Derived. */
79 atomic_zu_t internal;
80 atomic_zu_t resident; /* Derived. */
81 atomic_zu_t metadata_thp;
82
83 atomic_zu_t allocated_large; /* Derived. */
84 arena_stats_u64_t nmalloc_large; /* Derived. */
85 arena_stats_u64_t ndalloc_large; /* Derived. */
86 arena_stats_u64_t nrequests_large; /* Derived. */
87
88 /* Number of bytes cached in tcache associated with this arena. */
89 atomic_zu_t tcache_bytes; /* Derived. */
90
91 mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
92 #endif
93
94 /* One element for each large size class. */
95 arena_stats_large_t lstats[NSIZES - NBINS];
96
97 /* Arena uptime. */
98 nstime_t uptime;
99 };
100
101 static inline bool
arena_stats_init(UNUSED tsdn_t * tsdn,arena_stats_t * arena_stats)102 arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
103 if (config_debug) {
104 for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
105 assert(((char *)arena_stats)[i] == 0);
106 }
107 }
108 #ifndef JEMALLOC_ATOMIC_U64
109 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
110 WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
111 return true;
112 }
113 #endif
114 /* Memory is zeroed, so there is no need to clear stats. */
115 return false;
116 }
117
118 static inline void
arena_stats_lock(tsdn_t * tsdn,arena_stats_t * arena_stats)119 arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
120 #ifndef JEMALLOC_ATOMIC_U64
121 malloc_mutex_lock(tsdn, &arena_stats->mtx);
122 #endif
123 }
124
125 static inline void
arena_stats_unlock(tsdn_t * tsdn,arena_stats_t * arena_stats)126 arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
127 #ifndef JEMALLOC_ATOMIC_U64
128 malloc_mutex_unlock(tsdn, &arena_stats->mtx);
129 #endif
130 }
131
132 static inline uint64_t
arena_stats_read_u64(tsdn_t * tsdn,arena_stats_t * arena_stats,arena_stats_u64_t * p)133 arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
134 arena_stats_u64_t *p) {
135 #ifdef JEMALLOC_ATOMIC_U64
136 return atomic_load_u64(p, ATOMIC_RELAXED);
137 #else
138 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
139 return *p;
140 #endif
141 }
142
143 static inline void
arena_stats_add_u64(tsdn_t * tsdn,arena_stats_t * arena_stats,arena_stats_u64_t * p,uint64_t x)144 arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
145 arena_stats_u64_t *p, uint64_t x) {
146 #ifdef JEMALLOC_ATOMIC_U64
147 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
148 #else
149 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
150 *p += x;
151 #endif
152 }
153
154 UNUSED static inline void
arena_stats_sub_u64(tsdn_t * tsdn,arena_stats_t * arena_stats,arena_stats_u64_t * p,uint64_t x)155 arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
156 arena_stats_u64_t *p, uint64_t x) {
157 #ifdef JEMALLOC_ATOMIC_U64
158 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
159 assert(r - x <= r);
160 #else
161 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
162 *p -= x;
163 assert(*p + x >= *p);
164 #endif
165 }
166
167 /*
168 * Non-atomically sets *dst += src. *dst needs external synchronization.
169 * This lets us avoid the cost of a fetch_add when its unnecessary (note that
170 * the types here are atomic).
171 */
172 static inline void
arena_stats_accum_u64(arena_stats_u64_t * dst,uint64_t src)173 arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
174 #ifdef JEMALLOC_ATOMIC_U64
175 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
176 atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
177 #else
178 *dst += src;
179 #endif
180 }
181
182 static inline size_t
arena_stats_read_zu(tsdn_t * tsdn,arena_stats_t * arena_stats,atomic_zu_t * p)183 arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
184 #ifdef JEMALLOC_ATOMIC_U64
185 return atomic_load_zu(p, ATOMIC_RELAXED);
186 #else
187 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
188 return atomic_load_zu(p, ATOMIC_RELAXED);
189 #endif
190 }
191
192 static inline void
arena_stats_add_zu(tsdn_t * tsdn,arena_stats_t * arena_stats,atomic_zu_t * p,size_t x)193 arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
194 size_t x) {
195 #ifdef JEMALLOC_ATOMIC_U64
196 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
197 #else
198 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
199 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
200 atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
201 #endif
202 }
203
204 static inline void
arena_stats_sub_zu(tsdn_t * tsdn,arena_stats_t * arena_stats,atomic_zu_t * p,size_t x)205 arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
206 size_t x) {
207 #ifdef JEMALLOC_ATOMIC_U64
208 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
209 assert(r - x <= r);
210 #else
211 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
212 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
213 atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
214 #endif
215 }
216
217 /* Like the _u64 variant, needs an externally synchronized *dst. */
218 static inline void
arena_stats_accum_zu(atomic_zu_t * dst,size_t src)219 arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
220 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
221 atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
222 }
223
224 static inline void
arena_stats_large_nrequests_add(tsdn_t * tsdn,arena_stats_t * arena_stats,szind_t szind,uint64_t nrequests)225 arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
226 szind_t szind, uint64_t nrequests) {
227 #if !defined(ANDROID_MINIMIZE_STRUCTS)
228 arena_stats_lock(tsdn, arena_stats);
229 arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
230 NBINS].nrequests, nrequests);
231 arena_stats_unlock(tsdn, arena_stats);
232 #endif
233 }
234
235 static inline void
arena_stats_mapped_add(tsdn_t * tsdn,arena_stats_t * arena_stats,size_t size)236 arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
237 arena_stats_lock(tsdn, arena_stats);
238 arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
239 arena_stats_unlock(tsdn, arena_stats);
240 }
241
242
243 #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
244