xref: /aosp_15_r20/external/ltp/testcases/kernel/mem/hugetlb/hugemmap/hugemmap24.c (revision 49cdfc7efb34551c7342be41a7384b9c40d7cab7)
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2009 IBM Corporation.
4  * Author: David Gibson
5  */
6 
7 /*\
8  * [Description]
9  *
10  * Kernel has bug in mremap for some architecture. mremap() can cause
11  * crashes on architectures with holes in the address space (like ia64)
12  * and on powerpc with it's distict page size slices.
13  *
14  * This test perform mremap() with normal and hugepages around powerpc
15  * slice boundary.
16  */
17 
18 #define _GNU_SOURCE
19 #include "hugetlb.h"
20 
21 #define RANDOM_CONSTANT 0x1234ABCD
22 #define MNTPOINT "hugetlbfs/"
23 
24 static int  fd = -1;
25 static unsigned long slice_boundary;
26 static unsigned long hpage_size, page_size;
27 
init_slice_boundary(int fd)28 static int init_slice_boundary(int fd)
29 {
30 	unsigned long slice_size;
31 	void *p, *heap;
32 	int i;
33 #if defined(__LP64__) && !defined(__aarch64__) && !defined(__loongarch__)
34 	/* powerpc: 1TB slices starting at 1 TB */
35 	slice_boundary = 0x10000000000;
36 	slice_size = 0x10000000000;
37 #else
38 	/* powerpc: 256MB slices up to 4GB */
39 	slice_boundary = 0x00000000;
40 	slice_size = 0x10000000;
41 #endif
42 
43 	/* dummy malloc so we know where is heap */
44 	heap = malloc(1);
45 
46 	 /* Avoid underflow on systems with large huge pages.
47 	  * The additionally plus heap address is to reduce the possibility
48 	  * of MAP_FIXED stomp over existing mappings.
49 	  */
50 	while (slice_boundary + slice_size < (unsigned long)heap + 2*hpage_size)
51 		slice_boundary += slice_size;
52 
53 	free(heap);
54 
55 	/* Find 2 neighbour slices with couple huge pages free
56 	 * around slice boundary.
57 	 * 16 is the maximum number of slices (low/high)
58 	 */
59 	for (i = 0; i < 16-1; i++) {
60 		slice_boundary += slice_size;
61 		p = mmap((void *)(slice_boundary-2*hpage_size), 4*hpage_size,
62 			PROT_READ, MAP_SHARED | MAP_FIXED, fd, 0);
63 		if (p == MAP_FAILED) {
64 			tst_res(TINFO|TERRNO, "can't use slice_boundary: 0x%lx",
65 					slice_boundary);
66 		} else {
67 			SAFE_MUNMAP(p, 4*hpage_size);
68 			break;
69 		}
70 	}
71 
72 	if (p == MAP_FAILED) {
73 		tst_res(TFAIL|TERRNO, "couldn't find 2 free neighbour slices");
74 		return -1;
75 	}
76 
77 	tst_res(TINFO, "using slice_boundary: 0x%lx", slice_boundary);
78 
79 	return 0;
80 }
81 
run_test(void)82 static void run_test(void)
83 {
84 	void *p = NULL, *q = NULL, *r;
85 	long p_size, q_size;
86 	int ret;
87 
88 	fd = tst_creat_unlinked(MNTPOINT, 0);
89 	ret = init_slice_boundary(fd);
90 	if (ret)
91 		goto cleanup;
92 
93 	/* First, hugepages above, normal below */
94 	tst_res(TINFO, "Testing with hpage above & normal below the slice_boundary");
95 	p_size = hpage_size;
96 	p = SAFE_MMAP((void *)(slice_boundary + hpage_size), p_size,
97 		 PROT_READ | PROT_WRITE,
98 		 MAP_SHARED | MAP_FIXED, fd, 0);
99 
100 	ret = do_readback(p, p_size, "huge above");
101 	if (ret)
102 		goto cleanup;
103 
104 	q_size = page_size;
105 	q = SAFE_MMAP((void *)(slice_boundary - page_size), q_size,
106 		 PROT_READ | PROT_WRITE,
107 		 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
108 
109 	ret = do_readback(q, q_size, "normal below");
110 	if (ret)
111 		goto cleanup;
112 
113 	r = mremap(q, page_size, 2*page_size, 0);
114 	if (r == MAP_FAILED) {
115 		tst_res(TINFO, "mremap(%p, %lu, %lu, 0) disallowed",
116 				q, page_size, 2*page_size);
117 	} else {
118 		q_size = 2*page_size;
119 		if (r != q) {
120 			tst_res(TFAIL, "mremap() moved without MREMAP_MAYMOVE!?");
121 			ret = -1;
122 		} else
123 			ret = do_readback(q, 2*page_size, "normal below expanded");
124 	}
125 
126 	SAFE_MUNMAP(p, p_size);
127 	SAFE_MUNMAP(q, q_size);
128 	if (ret)
129 		goto cleanup_fd;
130 
131 	/* Next, normal pages above, huge below */
132 	tst_res(TINFO, "Testing with normal above & hpage below the slice_boundary");
133 	p_size = page_size;
134 	p = SAFE_MMAP((void *)(slice_boundary + hpage_size), p_size,
135 		 PROT_READ|PROT_WRITE,
136 		 MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
137 
138 	ret = do_readback(p, p_size, "normal above");
139 	if (ret)
140 		goto cleanup;
141 
142 	q_size = hpage_size;
143 	q = SAFE_MMAP((void *)(slice_boundary - hpage_size),
144 		 q_size, PROT_READ | PROT_WRITE,
145 		 MAP_SHARED | MAP_FIXED, fd, 0);
146 
147 	ret = do_readback(q, q_size, "huge below");
148 	if (ret)
149 		goto cleanup;
150 
151 	r = mremap(q, hpage_size, 2*hpage_size, 0);
152 	if (r == MAP_FAILED) {
153 		tst_res(TINFO, "mremap(%p, %lu, %lu, 0) disallowed",
154 				q, hpage_size, 2*hpage_size);
155 	} else {
156 		q_size = 2*hpage_size;
157 		if (r != q) {
158 			tst_res(TFAIL, "mremap() moved without MREMAP_MAYMOVE!?");
159 			ret = -1;
160 		} else
161 			ret = do_readback(q, 2*hpage_size, "huge below expanded");
162 	}
163 	if (ret)
164 		goto cleanup;
165 
166 	tst_res(TPASS, "Successful");
167 
168 cleanup:
169 	if (p)
170 		SAFE_MUNMAP(p, p_size);
171 	if (q)
172 		SAFE_MUNMAP(q, q_size);
173 cleanup_fd:
174 	SAFE_CLOSE(fd);
175 }
176 
setup(void)177 static void setup(void)
178 {
179 	hpage_size = tst_get_hugepage_size();
180 	page_size = getpagesize();
181 }
182 
cleanup(void)183 static void cleanup(void)
184 {
185 	if (fd >= 0)
186 		SAFE_CLOSE(fd);
187 }
188 
189 static struct tst_test test = {
190 	.needs_root = 1,
191 	.mntpoint = MNTPOINT,
192 	.needs_hugetlbfs = 1,
193 	.needs_tmpdir = 1,
194 	.setup = setup,
195 	.cleanup = cleanup,
196 	.test_all = run_test,
197 	.hugepages = {4, TST_NEEDS},
198 };
199