xref: /aosp_15_r20/external/musl/src/malloc/lite_malloc.c (revision c9945492fdd68bbe62686c5b452b4dc1be3f8453)
1 #include <stdlib.h>
2 #include <stdint.h>
3 #include <limits.h>
4 #include <errno.h>
5 #include <sys/mman.h>
6 #include "libc.h"
7 #include "lock.h"
8 #include "syscall.h"
9 #include "fork_impl.h"
10 
11 #define ALIGN 16
12 
13 /* This function returns true if the interval [old,new]
14  * intersects the 'len'-sized interval below &libc.auxv
15  * (interpreted as the main-thread stack) or below &b
16  * (the current stack). It is used to defend against
17  * buggy brk implementations that can cross the stack. */
18 
traverses_stack_p(uintptr_t old,uintptr_t new)19 static int traverses_stack_p(uintptr_t old, uintptr_t new)
20 {
21 	const uintptr_t len = 8<<20;
22 	uintptr_t a, b;
23 
24 	b = (uintptr_t)libc.auxv;
25 	a = b > len ? b-len : 0;
26 	if (new>a && old<b) return 1;
27 
28 	b = (uintptr_t)&b;
29 	a = b > len ? b-len : 0;
30 	if (new>a && old<b) return 1;
31 
32 	return 0;
33 }
34 
35 static volatile int lock[1];
36 volatile int *const __bump_lockptr = lock;
37 
__simple_malloc(size_t n)38 static void *__simple_malloc(size_t n)
39 {
40 	static uintptr_t brk, cur, end;
41 	static unsigned mmap_step;
42 	size_t align=1;
43 	void *p;
44 
45 	if (n > SIZE_MAX/2) {
46 		errno = ENOMEM;
47 		return 0;
48 	}
49 
50 	if (!n) n++;
51 	while (align<n && align<ALIGN)
52 		align += align;
53 
54 	LOCK(lock);
55 
56 	cur += -cur & align-1;
57 
58 	if (n > end-cur) {
59 		size_t req = n - (end-cur) + PAGE_SIZE-1 & -PAGE_SIZE;
60 
61 		if (!cur) {
62 			brk = __syscall(SYS_brk, 0);
63 			brk += -brk & PAGE_SIZE-1;
64 			cur = end = brk;
65 		}
66 
67 		if (brk == end && req < SIZE_MAX-brk
68 		    && !traverses_stack_p(brk, brk+req)
69 		    && __syscall(SYS_brk, brk+req)==brk+req) {
70 			brk = end += req;
71 		} else {
72 			int new_area = 0;
73 			req = n + PAGE_SIZE-1 & -PAGE_SIZE;
74 			/* Only make a new area rather than individual mmap
75 			 * if wasted space would be over 1/8 of the map. */
76 			if (req-n > req/8) {
77 				/* Geometric area size growth up to 64 pages,
78 				 * bounding waste by 1/8 of the area. */
79 				size_t min = PAGE_SIZE<<(mmap_step/2);
80 				if (min-n > end-cur) {
81 					if (req < min) {
82 						req = min;
83 						if (mmap_step < 12)
84 							mmap_step++;
85 					}
86 					new_area = 1;
87 				}
88 			}
89 			void *mem = __mmap(0, req, PROT_READ|PROT_WRITE,
90 				MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
91 			if (mem == MAP_FAILED || !new_area) {
92 				UNLOCK(lock);
93 				return mem==MAP_FAILED ? 0 : mem;
94 			}
95 			cur = (uintptr_t)mem;
96 			end = cur + req;
97 		}
98 	}
99 
100 	p = (void *)cur;
101 	cur += n;
102 	UNLOCK(lock);
103 	return p;
104 }
105 
106 weak_alias(__simple_malloc, __libc_malloc_impl);
107 
__libc_malloc(size_t n)108 void *__libc_malloc(size_t n)
109 {
110 	return __libc_malloc_impl(n);
111 }
112 
default_malloc(size_t n)113 static void *default_malloc(size_t n)
114 {
115 	return __libc_malloc_impl(n);
116 }
117 
118 weak_alias(default_malloc, malloc);
119