1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016-2019 FUJITSU LIMITED. All rights reserved.
4 * Author: Naoya Horiguchi <[email protected]>
5 * Ported: Guangwen Feng <[email protected]>
6 * Ported: Xiao Yang <[email protected]>
7 * Ported: Yang Xu <[email protected]>
8 */
9
10 /*\
11 * [Description]
12 *
13 * *Test 1*
14 *
15 * This is a regression test for the race condition between move_pages()
16 * and freeing hugepages, where move_pages() calls follow_page(FOLL_GET)
17 * for hugepages internally and tries to get its refcount without
18 * preventing concurrent freeing.
19 *
20 * This test can crash the buggy kernel, and the bug was fixed in:
21 *
22 * commit e66f17ff71772b209eed39de35aaa99ba819c93d
23 * Author: Naoya Horiguchi <[email protected]>
24 * Date: Wed Feb 11 15:25:22 2015 -0800
25 *
26 * mm/hugetlb: take page table lock in follow_huge_pmd()
27 *
28 * *Test 2.1*
29 *
30 * This is a regression test for the race condition, where move_pages()
31 * and soft offline are called on a single hugetlb page concurrently.
32 *
33 * This test can crash the buggy kernel, and was fixed by:
34 *
35 * commit c9d398fa237882ea07167e23bcfc5e6847066518
36 * Author: Naoya Horiguchi <[email protected]>
37 * Date: Fri Mar 31 15:11:55 2017 -0700
38 *
39 * mm, hugetlb: use pte_present() instead of pmd_present() in follow_huge_pmd()
40 *
41 * *Test 2.2*
42 *
43 * This is also a regression test for an race condition causing SIGBUS
44 * in hugepage migration/fault.
45 *
46 * This bug was fixed by:
47 *
48 * commit 4643d67e8cb0b3536ef0ab5cddd1cedc73fa14ad
49 * Author: Mike Kravetz <[email protected]>
50 * Date: Tue Aug 13 15:38:00 2019 -0700
51 *
52 * hugetlbfs: fix hugetlb page migration/fault race causing SIGBUS
53 *
54 * *Test 2.3*
55 *
56 * The madvise() in the do_soft_online() was also triggering cases where soft
57 * online returned EIO when page migration failed, which was fixed in:
58 *
59 * commit 3f4b815a439adfb8f238335612c4b28bc10084d8
60 * Author: Oscar Salvador <[email protected]>
61 * Date: Mon Dec 14 19:11:51 2020 -0800
62 *
63 * mm,hwpoison: return -EBUSY when migration fails
64 */
65
66 #include <errno.h>
67 #include <unistd.h>
68 #include <string.h>
69 #include <stdio.h>
70 #include <sys/types.h>
71 #include <sys/wait.h>
72
73 #include "tst_test.h"
74 #include "move_pages_support.h"
75 #include "lapi/mmap.h"
76
77 #ifdef HAVE_NUMA_V2
78
79 #define LOOPS 1000
80 #define PATH_MEMINFO "/proc/meminfo"
81 #define PATH_NR_HUGEPAGES "/proc/sys/vm/nr_hugepages"
82 #define PATH_HUGEPAGES "/sys/kernel/mm/hugepages/"
83 #define TEST_NODES 2
84
85 static struct tcase {
86 int tpages;
87 int offline;
88 } tcases[] = {
89 {2, 0},
90 {2, 1},
91 };
92
93 static int pgsz, hpsz;
94 static long orig_hugepages = -1;
95 static char path_hugepages_node1[PATH_MAX];
96 static char path_hugepages_node2[PATH_MAX];
97 static long orig_hugepages_node1 = -1;
98 static long orig_hugepages_node2 = -1;
99 static unsigned int node1, node2;
100 static void *addr;
101
do_soft_offline(int tpgs)102 static int do_soft_offline(int tpgs)
103 {
104 if (madvise(addr, tpgs * hpsz, MADV_SOFT_OFFLINE) == -1) {
105 if (errno != EINVAL && errno != EBUSY)
106 tst_res(TFAIL | TERRNO, "madvise failed");
107 return errno;
108 }
109 return 0;
110 }
111
do_child(int tpgs)112 static void do_child(int tpgs)
113 {
114 int test_pages = tpgs * hpsz / pgsz;
115 int i, j;
116 int *nodes, *status;
117 void **pages;
118 pid_t ppid = getppid();
119
120 pages = SAFE_MALLOC(sizeof(char *) * test_pages);
121 nodes = SAFE_MALLOC(sizeof(int) * test_pages);
122 status = SAFE_MALLOC(sizeof(int) * test_pages);
123
124 for (i = 0; i < test_pages; i++)
125 pages[i] = addr + i * pgsz;
126
127 for (i = 0; ; i++) {
128 for (j = 0; j < test_pages; j++) {
129 if (i % 2 == 0)
130 nodes[j] = node1;
131 else
132 nodes[j] = node2;
133 status[j] = 0;
134 }
135
136 TEST(numa_move_pages(ppid, test_pages,
137 pages, nodes, status, MPOL_MF_MOVE_ALL));
138 if (TST_RET < 0) {
139 if (errno == ENOMEM)
140 continue;
141
142 tst_res(TFAIL | TTERRNO, "move_pages failed");
143 break;
144 }
145 }
146
147 exit(0);
148 }
149
do_test(unsigned int n)150 static void do_test(unsigned int n)
151 {
152 int i, ret;
153 void *ptr;
154 pid_t cpid = -1;
155 int status;
156
157 SAFE_FILE_PRINTF("/proc/sys/vm/compact_memory", "1");
158
159 addr = SAFE_MMAP(NULL, tcases[n].tpages * hpsz, PROT_READ | PROT_WRITE,
160 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
161
162 SAFE_MUNMAP(addr, tcases[n].tpages * hpsz);
163
164 cpid = SAFE_FORK();
165 if (cpid == 0)
166 do_child(tcases[n].tpages);
167
168 for (i = 0; i < LOOPS; i++) {
169 ptr = mmap(NULL, tcases[n].tpages * hpsz,
170 PROT_READ | PROT_WRITE,
171 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
172 if (ptr == MAP_FAILED) {
173 if (i == 0)
174 tst_brk(TBROK | TERRNO, "Cannot allocate hugepage");
175
176 if (errno == ENOMEM) {
177 usleep(1000);
178 continue;
179 }
180 }
181
182 if (ptr != addr)
183 tst_brk(TBROK, "Failed to mmap at desired addr");
184
185 memset(addr, 0, tcases[n].tpages * hpsz);
186
187 if (tcases[n].offline) {
188 ret = do_soft_offline(tcases[n].tpages);
189
190 if (ret == EINVAL) {
191 SAFE_KILL(cpid, SIGKILL);
192 SAFE_WAITPID(cpid, &status, 0);
193 SAFE_MUNMAP(addr, tcases[n].tpages * hpsz);
194 tst_res(TCONF,
195 "madvise() didn't support MADV_SOFT_OFFLINE");
196 return;
197 }
198 }
199
200 SAFE_MUNMAP(addr, tcases[n].tpages * hpsz);
201
202 if (!tst_remaining_runtime())
203 break;
204 }
205
206 SAFE_KILL(cpid, SIGKILL);
207 SAFE_WAITPID(cpid, &status, 0);
208 if (!WIFEXITED(status))
209 tst_res(TPASS, "Bug not reproduced");
210 }
211
alloc_free_huge_on_node(unsigned int node,size_t size)212 static void alloc_free_huge_on_node(unsigned int node, size_t size)
213 {
214 char *mem;
215 long ret;
216 struct bitmask *bm;
217
218 tst_res(TINFO, "Allocating and freeing %zu hugepages on node %u",
219 size / hpsz, node);
220
221 mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
222 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
223 if (mem == MAP_FAILED) {
224 if (errno == ENOMEM)
225 tst_brk(TCONF, "Cannot allocate huge pages");
226
227 tst_brk(TBROK | TERRNO, "mmap(..., MAP_HUGETLB, ...) failed");
228 }
229
230 bm = numa_bitmask_alloc(numa_max_possible_node() + 1);
231 if (!bm)
232 tst_brk(TBROK | TERRNO, "numa_bitmask_alloc() failed");
233
234 numa_bitmask_setbit(bm, node);
235
236 ret = mbind(mem, size, MPOL_BIND, bm->maskp, bm->size + 1, 0);
237 if (ret) {
238 if (errno == ENOMEM)
239 tst_brk(TCONF, "Cannot mbind huge pages");
240
241 tst_brk(TBROK | TERRNO, "mbind() failed");
242 }
243
244 TEST(mlock(mem, size));
245 if (TST_RET) {
246 SAFE_MUNMAP(mem, size);
247 if (TST_ERR == ENOMEM || TST_ERR == EAGAIN)
248 tst_brk(TCONF, "Cannot lock huge pages");
249 tst_brk(TBROK | TTERRNO, "mlock failed");
250 }
251
252 numa_bitmask_free(bm);
253
254 SAFE_MUNMAP(mem, size);
255 }
256
setup(void)257 static void setup(void)
258 {
259 int ret;
260 long memfree;
261
262 check_config(TEST_NODES);
263
264 if (access(PATH_HUGEPAGES, F_OK))
265 tst_brk(TCONF, "Huge page not supported");
266
267 ret = get_allowed_nodes(NH_MEMS, TEST_NODES, &node1, &node2);
268 if (ret < 0)
269 tst_brk(TBROK | TERRNO, "get_allowed_nodes: %d", ret);
270
271 pgsz = (int)get_page_size();
272 SAFE_FILE_LINES_SCANF(PATH_MEMINFO, "Hugepagesize: %d", &hpsz);
273
274 SAFE_FILE_PRINTF("/proc/sys/vm/drop_caches", "3");
275 SAFE_FILE_LINES_SCANF(PATH_MEMINFO, "MemFree: %ld", &memfree);
276 tst_res(TINFO, "Free RAM %ld kB", memfree);
277
278 if (4 * hpsz > memfree)
279 tst_brk(TBROK, "Not enough free RAM");
280
281 snprintf(path_hugepages_node1, sizeof(path_hugepages_node1),
282 "/sys/devices/system/node/node%u/hugepages/hugepages-%dkB/nr_hugepages",
283 node1, hpsz);
284
285 snprintf(path_hugepages_node2, sizeof(path_hugepages_node2),
286 "/sys/devices/system/node/node%u/hugepages/hugepages-%dkB/nr_hugepages",
287 node2, hpsz);
288
289 if (!access(path_hugepages_node1, F_OK)) {
290 SAFE_FILE_SCANF(path_hugepages_node1,
291 "%ld", &orig_hugepages_node1);
292 tst_res(TINFO,
293 "Increasing %dkB hugepages pool on node %u to %ld",
294 hpsz, node1, orig_hugepages_node1 + 4);
295 SAFE_FILE_PRINTF(path_hugepages_node1,
296 "%ld", orig_hugepages_node1 + 4);
297 }
298
299 if (!access(path_hugepages_node2, F_OK)) {
300 SAFE_FILE_SCANF(path_hugepages_node2,
301 "%ld", &orig_hugepages_node2);
302 tst_res(TINFO,
303 "Increasing %dkB hugepages pool on node %u to %ld",
304 hpsz, node2, orig_hugepages_node2 + 4);
305 SAFE_FILE_PRINTF(path_hugepages_node2,
306 "%ld", orig_hugepages_node2 + 4);
307 }
308
309 hpsz *= 1024;
310
311 if (orig_hugepages_node1 == -1 || orig_hugepages_node2 == -1) {
312 SAFE_FILE_SCANF(PATH_NR_HUGEPAGES, "%ld", &orig_hugepages);
313 tst_res(TINFO, "Increasing global hugepages pool to %ld",
314 orig_hugepages + 8);
315 SAFE_FILE_PRINTF(PATH_NR_HUGEPAGES, "%ld", orig_hugepages + 8);
316 }
317
318 alloc_free_huge_on_node(node1, 4L * hpsz);
319 alloc_free_huge_on_node(node2, 4L * hpsz);
320 }
321
cleanup(void)322 static void cleanup(void)
323 {
324 if (orig_hugepages != -1)
325 SAFE_FILE_PRINTF(PATH_NR_HUGEPAGES, "%ld", orig_hugepages);
326
327 if (orig_hugepages_node1 != -1) {
328 SAFE_FILE_PRINTF(path_hugepages_node1,
329 "%ld", orig_hugepages_node1);
330 }
331
332 if (orig_hugepages_node2 != -1) {
333 SAFE_FILE_PRINTF(path_hugepages_node2,
334 "%ld", orig_hugepages_node2);
335 }
336 }
337
338 static struct tst_test test = {
339 .needs_root = 1,
340 .forks_child = 1,
341 .setup = setup,
342 .cleanup = cleanup,
343 .test = do_test,
344 .tcnt = ARRAY_SIZE(tcases),
345 .max_runtime = 240,
346 .tags = (const struct tst_tag[]) {
347 {"linux-git", "e66f17ff7177"},
348 {"linux-git", "c9d398fa2378"},
349 {"linux-git", "4643d67e8cb0"},
350 {"linux-git", "3f4b815a439a"},
351 {}
352 }
353 };
354
355 #else
356 TST_TEST_TCONF(NUMA_ERROR_MSG);
357 #endif
358