1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 // A low-level allocator that can be used by other low-level
16 // modules without introducing dependency cycles.
17 // This allocator is slow and wasteful of memory;
18 // it should not be used when performance is key.
19
20 #include "absl/base/internal/low_level_alloc.h"
21
22 #include <type_traits>
23
24 #include "absl/base/call_once.h"
25 #include "absl/base/config.h"
26 #include "absl/base/internal/direct_mmap.h"
27 #include "absl/base/internal/scheduling_mode.h"
28 #include "absl/base/macros.h"
29 #include "absl/base/thread_annotations.h"
30
31 // LowLevelAlloc requires that the platform support low-level
32 // allocation of virtual memory. Platforms lacking this cannot use
33 // LowLevelAlloc.
34 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
35
36 #ifndef _WIN32
37 #include <pthread.h>
38 #include <signal.h>
39 #include <sys/mman.h>
40 #include <unistd.h>
41 #else
42 #include <windows.h>
43 #endif
44
45 #ifdef __linux__
46 #include <sys/prctl.h>
47 #endif
48
49 #include <string.h>
50
51 #include <algorithm>
52 #include <atomic>
53 #include <cerrno>
54 #include <cstddef>
55 #include <new> // for placement-new
56
57 #include "absl/base/dynamic_annotations.h"
58 #include "absl/base/internal/raw_logging.h"
59 #include "absl/base/internal/spinlock.h"
60
61 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
62 #define MAP_ANONYMOUS MAP_ANON
63 #endif
64
65 namespace absl {
66 ABSL_NAMESPACE_BEGIN
67 namespace base_internal {
68
69 // A first-fit allocator with amortized logarithmic free() time.
70
71 // ---------------------------------------------------------------------------
72 static const int kMaxLevel = 30;
73
74 namespace {
75 // This struct describes one allocated block, or one free block.
76 struct AllocList {
77 struct Header {
78 // Size of entire region, including this field. Must be
79 // first. Valid in both allocated and unallocated blocks.
80 uintptr_t size;
81
82 // kMagicAllocated or kMagicUnallocated xor this.
83 uintptr_t magic;
84
85 // Pointer to parent arena.
86 LowLevelAlloc::Arena *arena;
87
88 // Aligns regions to 0 mod 2*sizeof(void*).
89 void *dummy_for_alignment;
90 } header;
91
92 // Next two fields: in unallocated blocks: freelist skiplist data
93 // in allocated blocks: overlaps with client data
94
95 // Levels in skiplist used.
96 int levels;
97
98 // Actually has levels elements. The AllocList node may not have room
99 // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
100 AllocList *next[kMaxLevel];
101 };
102 } // namespace
103
104 // ---------------------------------------------------------------------------
105 // A trivial skiplist implementation. This is used to keep the freelist
106 // in address order while taking only logarithmic time per insert and delete.
107
108 // An integer approximation of log2(size/base)
109 // Requires size >= base.
IntLog2(size_t size,size_t base)110 static int IntLog2(size_t size, size_t base) {
111 int result = 0;
112 for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
113 result++;
114 }
115 // floor(size / 2**result) <= base < floor(size / 2**(result-1))
116 // => log2(size/(base+1)) <= result < 1+log2(size/base)
117 // => result ~= log2(size/base)
118 return result;
119 }
120
121 // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
Random(uint32_t * state)122 static int Random(uint32_t *state) {
123 uint32_t r = *state;
124 int result = 1;
125 while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
126 result++;
127 }
128 *state = r;
129 return result;
130 }
131
132 // Return a number of skiplist levels for a node of size bytes, where
133 // base is the minimum node size. Compute level=log2(size / base)+n
134 // where n is 1 if random is false and otherwise a random number generated with
135 // the standard distribution for a skiplist: See Random() above.
136 // Bigger nodes tend to have more skiplist levels due to the log2(size / base)
137 // term, so first-fit searches touch fewer nodes. "level" is clipped so
138 // level<kMaxLevel and next[level-1] will fit in the node.
139 // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
LLA_SkiplistLevels(size_t size,size_t base,uint32_t * random)140 static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
141 // max_fit is the maximum number of levels that will fit in a node for the
142 // given size. We can't return more than max_fit, no matter what the
143 // random number generator says.
144 size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
145 int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
146 if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
147 if (level > kMaxLevel - 1) level = kMaxLevel - 1;
148 ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
149 return level;
150 }
151
152 // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
153 // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
154 // points to the last element at level i in the AllocList less than *e, or is
155 // head if no such element exists.
LLA_SkiplistSearch(AllocList * head,AllocList * e,AllocList ** prev)156 static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
157 AllocList **prev) {
158 AllocList *p = head;
159 for (int level = head->levels - 1; level >= 0; level--) {
160 for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
161 }
162 prev[level] = p;
163 }
164 return (head->levels == 0) ? nullptr : prev[0]->next[0];
165 }
166
167 // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
168 // Requires that e->levels be previously set by the caller (using
169 // LLA_SkiplistLevels())
LLA_SkiplistInsert(AllocList * head,AllocList * e,AllocList ** prev)170 static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
171 AllocList **prev) {
172 LLA_SkiplistSearch(head, e, prev);
173 for (; head->levels < e->levels; head->levels++) { // extend prev pointers
174 prev[head->levels] = head; // to all *e's levels
175 }
176 for (int i = 0; i != e->levels; i++) { // add element to list
177 e->next[i] = prev[i]->next[i];
178 prev[i]->next[i] = e;
179 }
180 }
181
182 // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
183 // Requires that e->levels be previous set by the caller (using
184 // LLA_SkiplistLevels())
LLA_SkiplistDelete(AllocList * head,AllocList * e,AllocList ** prev)185 static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
186 AllocList **prev) {
187 AllocList *found = LLA_SkiplistSearch(head, e, prev);
188 ABSL_RAW_CHECK(e == found, "element not in freelist");
189 for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
190 prev[i]->next[i] = e->next[i];
191 }
192 while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
193 head->levels--; // reduce head->levels if level unused
194 }
195 }
196
197 // ---------------------------------------------------------------------------
198 // Arena implementation
199
200 // Metadata for an LowLevelAlloc arena instance.
201 struct LowLevelAlloc::Arena {
202 // Constructs an arena with the given LowLevelAlloc flags.
203 explicit Arena(uint32_t flags_value);
204
205 base_internal::SpinLock mu;
206 // Head of free list, sorted by address
207 AllocList freelist ABSL_GUARDED_BY(mu);
208 // Count of allocated blocks
209 int32_t allocation_count ABSL_GUARDED_BY(mu);
210 // flags passed to NewArena
211 const uint32_t flags;
212 // Result of sysconf(_SC_PAGESIZE)
213 const size_t pagesize;
214 // Lowest power of two >= max(16, sizeof(AllocList))
215 const size_t round_up;
216 // Smallest allocation block size
217 const size_t min_size;
218 // PRNG state
219 uint32_t random ABSL_GUARDED_BY(mu);
220 };
221
222 namespace {
223 // Static storage space for the lazily-constructed, default global arena
224 // instances. We require this space because the whole point of LowLevelAlloc
225 // is to avoid relying on malloc/new.
226 alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
227 LowLevelAlloc::Arena)];
228 alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
229 LowLevelAlloc::Arena)];
230 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
231 alignas(
232 LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
233 [sizeof(LowLevelAlloc::Arena)];
234 #endif
235
236 // We must use LowLevelCallOnce here to construct the global arenas, rather than
237 // using function-level statics, to avoid recursively invoking the scheduler.
238 absl::once_flag create_globals_once;
239
CreateGlobalArenas()240 void CreateGlobalArenas() {
241 new (&default_arena_storage)
242 LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
243 new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
244 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
245 new (&unhooked_async_sig_safe_arena_storage)
246 LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
247 #endif
248 }
249
250 // Returns a global arena that does not call into hooks. Used by NewArena()
251 // when kCallMallocHook is not set.
UnhookedArena()252 LowLevelAlloc::Arena *UnhookedArena() {
253 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
254 return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
255 }
256
257 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
258 // Returns a global arena that is async-signal safe. Used by NewArena() when
259 // kAsyncSignalSafe is set.
UnhookedAsyncSigSafeArena()260 LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
261 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
262 return reinterpret_cast<LowLevelAlloc::Arena *>(
263 &unhooked_async_sig_safe_arena_storage);
264 }
265 #endif
266
267 } // namespace
268
269 // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
DefaultArena()270 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
271 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
272 return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
273 }
274
275 // magic numbers to identify allocated and unallocated blocks
276 static const uintptr_t kMagicAllocated = 0x4c833e95U;
277 static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
278
279 namespace {
280 class ABSL_SCOPED_LOCKABLE ArenaLock {
281 public:
282 explicit ArenaLock(LowLevelAlloc::Arena *arena)
283 ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
284 : arena_(arena) {
285 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
286 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
287 sigset_t all;
288 sigfillset(&all);
289 mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
290 }
291 #endif
292 arena_->mu.Lock();
293 }
~ArenaLock()294 ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
Leave()295 void Leave() ABSL_UNLOCK_FUNCTION() {
296 arena_->mu.Unlock();
297 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
298 if (mask_valid_) {
299 const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
300 if (err != 0) {
301 ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
302 }
303 }
304 #endif
305 left_ = true;
306 }
307
308 private:
309 bool left_ = false; // whether left region
310 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
311 bool mask_valid_ = false;
312 sigset_t mask_; // old mask of blocked signals
313 #endif
314 LowLevelAlloc::Arena *arena_;
315 ArenaLock(const ArenaLock &) = delete;
316 ArenaLock &operator=(const ArenaLock &) = delete;
317 };
318 } // namespace
319
320 // create an appropriate magic number for an object at "ptr"
321 // "magic" should be kMagicAllocated or kMagicUnallocated
Magic(uintptr_t magic,AllocList::Header * ptr)322 inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
323 return magic ^ reinterpret_cast<uintptr_t>(ptr);
324 }
325
326 namespace {
GetPageSize()327 size_t GetPageSize() {
328 #ifdef _WIN32
329 SYSTEM_INFO system_info;
330 GetSystemInfo(&system_info);
331 return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
332 #elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
333 return getpagesize();
334 #else
335 return static_cast<size_t>(sysconf(_SC_PAGESIZE));
336 #endif
337 }
338
RoundedUpBlockSize()339 size_t RoundedUpBlockSize() {
340 // Round up block sizes to a power of two close to the header size.
341 size_t round_up = 16;
342 while (round_up < sizeof(AllocList::Header)) {
343 round_up += round_up;
344 }
345 return round_up;
346 }
347
348 } // namespace
349
Arena(uint32_t flags_value)350 LowLevelAlloc::Arena::Arena(uint32_t flags_value)
351 : mu(base_internal::SCHEDULE_KERNEL_ONLY),
352 allocation_count(0),
353 flags(flags_value),
354 pagesize(GetPageSize()),
355 round_up(RoundedUpBlockSize()),
356 min_size(2 * round_up),
357 random(0) {
358 freelist.header.size = 0;
359 freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
360 freelist.header.arena = this;
361 freelist.levels = 0;
362 memset(freelist.next, 0, sizeof(freelist.next));
363 }
364
365 // L < meta_data_arena->mu
NewArena(uint32_t flags)366 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
367 Arena *meta_data_arena = DefaultArena();
368 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
369 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
370 meta_data_arena = UnhookedAsyncSigSafeArena();
371 } else // NOLINT(readability/braces)
372 #endif
373 if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
374 meta_data_arena = UnhookedArena();
375 }
376 Arena *result =
377 new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
378 return result;
379 }
380
381 // L < arena->mu, L < arena->arena->mu
DeleteArena(Arena * arena)382 bool LowLevelAlloc::DeleteArena(Arena *arena) {
383 ABSL_RAW_CHECK(
384 arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
385 "may not delete default arena");
386 ArenaLock section(arena);
387 if (arena->allocation_count != 0) {
388 section.Leave();
389 return false;
390 }
391 while (arena->freelist.next[0] != nullptr) {
392 AllocList *region = arena->freelist.next[0];
393 size_t size = region->header.size;
394 arena->freelist.next[0] = region->next[0];
395 ABSL_RAW_CHECK(
396 region->header.magic == Magic(kMagicUnallocated, ®ion->header),
397 "bad magic number in DeleteArena()");
398 ABSL_RAW_CHECK(region->header.arena == arena,
399 "bad arena pointer in DeleteArena()");
400 ABSL_RAW_CHECK(size % arena->pagesize == 0,
401 "empty arena has non-page-aligned block size");
402 ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
403 "empty arena has non-page-aligned block");
404 int munmap_result;
405 #ifdef _WIN32
406 munmap_result = VirtualFree(region, 0, MEM_RELEASE);
407 ABSL_RAW_CHECK(munmap_result != 0,
408 "LowLevelAlloc::DeleteArena: VitualFree failed");
409 #else
410 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
411 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
412 munmap_result = munmap(region, size);
413 } else {
414 munmap_result = base_internal::DirectMunmap(region, size);
415 }
416 #else
417 munmap_result = munmap(region, size);
418 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
419 if (munmap_result != 0) {
420 ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
421 errno);
422 }
423 #endif // _WIN32
424 }
425 section.Leave();
426 arena->~Arena();
427 Free(arena);
428 return true;
429 }
430
431 // ---------------------------------------------------------------------------
432
433 // Addition, checking for overflow. The intent is to die if an external client
434 // manages to push through a request that would cause arithmetic to fail.
CheckedAdd(uintptr_t a,uintptr_t b)435 static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
436 uintptr_t sum = a + b;
437 ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
438 return sum;
439 }
440
441 // Return value rounded up to next multiple of align.
442 // align must be a power of two.
RoundUp(uintptr_t addr,uintptr_t align)443 static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
444 return CheckedAdd(addr, align - 1) & ~(align - 1);
445 }
446
447 // Equivalent to "return prev->next[i]" but with sanity checking
448 // that the freelist is in the correct order, that it
449 // consists of regions marked "unallocated", and that no two regions
450 // are adjacent in memory (they should have been coalesced).
451 // L >= arena->mu
Next(int i,AllocList * prev,LowLevelAlloc::Arena * arena)452 static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
453 ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
454 AllocList *next = prev->next[i];
455 if (next != nullptr) {
456 ABSL_RAW_CHECK(
457 next->header.magic == Magic(kMagicUnallocated, &next->header),
458 "bad magic number in Next()");
459 ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
460 if (prev != &arena->freelist) {
461 ABSL_RAW_CHECK(prev < next, "unordered freelist");
462 ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
463 reinterpret_cast<char *>(next),
464 "malformed freelist");
465 }
466 }
467 return next;
468 }
469
470 // Coalesce list item "a" with its successor if they are adjacent.
Coalesce(AllocList * a)471 static void Coalesce(AllocList *a) {
472 AllocList *n = a->next[0];
473 if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
474 reinterpret_cast<char *>(n)) {
475 LowLevelAlloc::Arena *arena = a->header.arena;
476 a->header.size += n->header.size;
477 n->header.magic = 0;
478 n->header.arena = nullptr;
479 AllocList *prev[kMaxLevel];
480 LLA_SkiplistDelete(&arena->freelist, n, prev);
481 LLA_SkiplistDelete(&arena->freelist, a, prev);
482 a->levels =
483 LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
484 LLA_SkiplistInsert(&arena->freelist, a, prev);
485 }
486 }
487
488 // Adds block at location "v" to the free list
489 // L >= arena->mu
AddToFreelist(void * v,LowLevelAlloc::Arena * arena)490 static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
491 AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
492 sizeof(f->header));
493 ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
494 "bad magic number in AddToFreelist()");
495 ABSL_RAW_CHECK(f->header.arena == arena,
496 "bad arena pointer in AddToFreelist()");
497 f->levels =
498 LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
499 AllocList *prev[kMaxLevel];
500 LLA_SkiplistInsert(&arena->freelist, f, prev);
501 f->header.magic = Magic(kMagicUnallocated, &f->header);
502 Coalesce(f); // maybe coalesce with successor
503 Coalesce(prev[0]); // maybe coalesce with predecessor
504 }
505
506 // Frees storage allocated by LowLevelAlloc::Alloc().
507 // L < arena->mu
Free(void * v)508 void LowLevelAlloc::Free(void *v) {
509 if (v != nullptr) {
510 AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
511 sizeof(f->header));
512 LowLevelAlloc::Arena *arena = f->header.arena;
513 ArenaLock section(arena);
514 AddToFreelist(v, arena);
515 ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
516 arena->allocation_count--;
517 section.Leave();
518 }
519 }
520
521 // allocates and returns a block of size bytes, to be freed with Free()
522 // L < arena->mu
DoAllocWithArena(size_t request,LowLevelAlloc::Arena * arena)523 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
524 void *result = nullptr;
525 if (request != 0) {
526 AllocList *s; // will point to region that satisfies request
527 ArenaLock section(arena);
528 // round up with header
529 size_t req_rnd =
530 RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
531 for (;;) { // loop until we find a suitable region
532 // find the minimum levels that a block of this size must have
533 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
534 if (i < arena->freelist.levels) { // potential blocks exist
535 AllocList *before = &arena->freelist; // predecessor of s
536 while ((s = Next(i, before, arena)) != nullptr &&
537 s->header.size < req_rnd) {
538 before = s;
539 }
540 if (s != nullptr) { // we found a region
541 break;
542 }
543 }
544 // we unlock before mmap() both because mmap() may call a callback hook,
545 // and because it may be slow.
546 arena->mu.Unlock();
547 // mmap generous 64K chunks to decrease
548 // the chances/impact of fragmentation:
549 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
550 void *new_pages;
551 #ifdef _WIN32
552 new_pages = VirtualAlloc(nullptr, new_pages_size,
553 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
554 ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
555 #else
556 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
557 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
558 new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
559 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
560 } else {
561 new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
562 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
563 }
564 #else
565 new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
566 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
567 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
568 if (new_pages == MAP_FAILED) {
569 ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
570 }
571
572 #ifdef __linux__
573 #if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
574 // Attempt to name the allocated address range in /proc/$PID/smaps on
575 // Linux.
576 //
577 // This invocation of prctl() may fail if the Linux kernel was not
578 // configured with the CONFIG_ANON_VMA_NAME option. This is OK since
579 // the naming of arenas is primarily a debugging aid.
580 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
581 "absl");
582 #endif
583 #endif // __linux__
584 #endif // _WIN32
585 arena->mu.Lock();
586 s = reinterpret_cast<AllocList *>(new_pages);
587 s->header.size = new_pages_size;
588 // Pretend the block is allocated; call AddToFreelist() to free it.
589 s->header.magic = Magic(kMagicAllocated, &s->header);
590 s->header.arena = arena;
591 AddToFreelist(&s->levels, arena); // insert new region into free list
592 }
593 AllocList *prev[kMaxLevel];
594 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
595 // s points to the first free region that's big enough
596 if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
597 // big enough to split
598 AllocList *n =
599 reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
600 n->header.size = s->header.size - req_rnd;
601 n->header.magic = Magic(kMagicAllocated, &n->header);
602 n->header.arena = arena;
603 s->header.size = req_rnd;
604 AddToFreelist(&n->levels, arena);
605 }
606 s->header.magic = Magic(kMagicAllocated, &s->header);
607 ABSL_RAW_CHECK(s->header.arena == arena, "");
608 arena->allocation_count++;
609 section.Leave();
610 result = &s->levels;
611 }
612 ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
613 return result;
614 }
615
Alloc(size_t request)616 void *LowLevelAlloc::Alloc(size_t request) {
617 void *result = DoAllocWithArena(request, DefaultArena());
618 return result;
619 }
620
AllocWithArena(size_t request,Arena * arena)621 void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
622 ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
623 void *result = DoAllocWithArena(request, arena);
624 return result;
625 }
626
627 } // namespace base_internal
628 ABSL_NAMESPACE_END
629 } // namespace absl
630
631 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING
632