1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <stdio.h>
4 #include <fcntl.h>
5 #include <string.h>
6 #include <sys/mman.h>
7 #include <errno.h>
8 #include <malloc.h>
9 #include "vm_util.h"
10 #include "../kselftest.h"
11 #include <linux/types.h>
12 #include <linux/memfd.h>
13 #include <linux/userfaultfd.h>
14 #include <linux/fs.h>
15 #include <sys/ioctl.h>
16 #include <sys/stat.h>
17 #include <math.h>
18 #include <asm/unistd.h>
19 #include <pthread.h>
20 #include <sys/resource.h>
21 #include <assert.h>
22 #include <sys/ipc.h>
23 #include <sys/shm.h>
24
25 #define PAGEMAP_BITS_ALL (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \
26 PAGE_IS_FILE | PAGE_IS_PRESENT | \
27 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \
28 PAGE_IS_HUGE)
29 #define PAGEMAP_NON_WRITTEN_BITS (PAGE_IS_WPALLOWED | PAGE_IS_FILE | \
30 PAGE_IS_PRESENT | PAGE_IS_SWAPPED | \
31 PAGE_IS_PFNZERO | PAGE_IS_HUGE)
32
33 #define TEST_ITERATIONS 100
34 #define PAGEMAP "/proc/self/pagemap"
35 int pagemap_fd;
36 int uffd;
37 unsigned int page_size;
38 unsigned int hpage_size;
39 const char *progname;
40
41 #define LEN(region) ((region.end - region.start)/page_size)
42
pagemap_ioctl(void * start,int len,void * vec,int vec_len,int flag,int max_pages,long required_mask,long anyof_mask,long excluded_mask,long return_mask)43 static long pagemap_ioctl(void *start, int len, void *vec, int vec_len, int flag,
44 int max_pages, long required_mask, long anyof_mask, long excluded_mask,
45 long return_mask)
46 {
47 struct pm_scan_arg arg;
48
49 arg.start = (uintptr_t)start;
50 arg.end = (uintptr_t)(start + len);
51 arg.vec = (uintptr_t)vec;
52 arg.vec_len = vec_len;
53 arg.flags = flag;
54 arg.size = sizeof(struct pm_scan_arg);
55 arg.max_pages = max_pages;
56 arg.category_mask = required_mask;
57 arg.category_anyof_mask = anyof_mask;
58 arg.category_inverted = excluded_mask;
59 arg.return_mask = return_mask;
60
61 return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
62 }
63
pagemap_ioc(void * start,int len,void * vec,int vec_len,int flag,int max_pages,long required_mask,long anyof_mask,long excluded_mask,long return_mask,long * walk_end)64 static long pagemap_ioc(void *start, int len, void *vec, int vec_len, int flag,
65 int max_pages, long required_mask, long anyof_mask, long excluded_mask,
66 long return_mask, long *walk_end)
67 {
68 struct pm_scan_arg arg;
69 int ret;
70
71 arg.start = (uintptr_t)start;
72 arg.end = (uintptr_t)(start + len);
73 arg.vec = (uintptr_t)vec;
74 arg.vec_len = vec_len;
75 arg.flags = flag;
76 arg.size = sizeof(struct pm_scan_arg);
77 arg.max_pages = max_pages;
78 arg.category_mask = required_mask;
79 arg.category_anyof_mask = anyof_mask;
80 arg.category_inverted = excluded_mask;
81 arg.return_mask = return_mask;
82
83 ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
84
85 if (walk_end)
86 *walk_end = arg.walk_end;
87
88 return ret;
89 }
90
91
init_uffd(void)92 int init_uffd(void)
93 {
94 struct uffdio_api uffdio_api;
95
96 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY);
97 if (uffd == -1)
98 return uffd;
99
100 uffdio_api.api = UFFD_API;
101 uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC |
102 UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
103 if (ioctl(uffd, UFFDIO_API, &uffdio_api))
104 return -1;
105
106 if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) ||
107 !(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) ||
108 !(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) ||
109 !(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM))
110 return -1;
111
112 return 0;
113 }
114
wp_init(void * lpBaseAddress,int dwRegionSize)115 int wp_init(void *lpBaseAddress, int dwRegionSize)
116 {
117 struct uffdio_register uffdio_register;
118 struct uffdio_writeprotect wp;
119
120 uffdio_register.range.start = (unsigned long)lpBaseAddress;
121 uffdio_register.range.len = dwRegionSize;
122 uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
123 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
124 ksft_exit_fail_msg("ioctl(UFFDIO_REGISTER) %d %s\n", errno, strerror(errno));
125
126 if (!(uffdio_register.ioctls & UFFDIO_WRITEPROTECT))
127 ksft_exit_fail_msg("ioctl set is incorrect\n");
128
129 wp.range.start = (unsigned long)lpBaseAddress;
130 wp.range.len = dwRegionSize;
131 wp.mode = UFFDIO_WRITEPROTECT_MODE_WP;
132
133 if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp))
134 ksft_exit_fail_msg("ioctl(UFFDIO_WRITEPROTECT)\n");
135
136 return 0;
137 }
138
wp_free(void * lpBaseAddress,int dwRegionSize)139 int wp_free(void *lpBaseAddress, int dwRegionSize)
140 {
141 struct uffdio_register uffdio_register;
142
143 uffdio_register.range.start = (unsigned long)lpBaseAddress;
144 uffdio_register.range.len = dwRegionSize;
145 uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
146 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range))
147 ksft_exit_fail_msg("ioctl unregister failure\n");
148 return 0;
149 }
150
wp_addr_range(void * lpBaseAddress,int dwRegionSize)151 int wp_addr_range(void *lpBaseAddress, int dwRegionSize)
152 {
153 if (pagemap_ioctl(lpBaseAddress, dwRegionSize, NULL, 0,
154 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
155 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0)
156 ksft_exit_fail_msg("error %d %d %s\n", 1, errno, strerror(errno));
157
158 return 0;
159 }
160
gethugetlb_mem(int size,int * shmid)161 void *gethugetlb_mem(int size, int *shmid)
162 {
163 char *mem;
164
165 if (shmid) {
166 *shmid = shmget(2, size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
167 if (*shmid < 0)
168 return NULL;
169
170 mem = shmat(*shmid, 0, 0);
171 if (mem == (char *)-1) {
172 shmctl(*shmid, IPC_RMID, NULL);
173 ksft_exit_fail_msg("Shared memory attach failure\n");
174 }
175 } else {
176 mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
177 MAP_ANONYMOUS | MAP_HUGETLB | MAP_PRIVATE, -1, 0);
178 if (mem == MAP_FAILED)
179 return NULL;
180 }
181
182 return mem;
183 }
184
userfaultfd_tests(void)185 int userfaultfd_tests(void)
186 {
187 int mem_size, vec_size, written, num_pages = 16;
188 char *mem, *vec;
189
190 mem_size = num_pages * page_size;
191 mem = mmap(NULL, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
192 if (mem == MAP_FAILED)
193 ksft_exit_fail_msg("error nomem\n");
194
195 wp_init(mem, mem_size);
196
197 /* Change protection of pages differently */
198 mprotect(mem, mem_size/8, PROT_READ|PROT_WRITE);
199 mprotect(mem + 1 * mem_size/8, mem_size/8, PROT_READ);
200 mprotect(mem + 2 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE);
201 mprotect(mem + 3 * mem_size/8, mem_size/8, PROT_READ);
202 mprotect(mem + 4 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE);
203 mprotect(mem + 5 * mem_size/8, mem_size/8, PROT_NONE);
204 mprotect(mem + 6 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE);
205 mprotect(mem + 7 * mem_size/8, mem_size/8, PROT_READ);
206
207 wp_addr_range(mem + (mem_size/16), mem_size - 2 * (mem_size/8));
208 wp_addr_range(mem, mem_size);
209
210 vec_size = mem_size/page_size;
211 vec = malloc(sizeof(struct page_region) * vec_size);
212
213 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
214 vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
215 if (written < 0)
216 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
217
218 ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", __func__);
219
220 wp_free(mem, mem_size);
221 munmap(mem, mem_size);
222 free(vec);
223 return 0;
224 }
225
get_reads(struct page_region * vec,int vec_size)226 int get_reads(struct page_region *vec, int vec_size)
227 {
228 int i, sum = 0;
229
230 for (i = 0; i < vec_size; i++)
231 sum += LEN(vec[i]);
232
233 return sum;
234 }
235
sanity_tests_sd(void)236 int sanity_tests_sd(void)
237 {
238 unsigned long long mem_size, vec_size, i, total_pages = 0;
239 long ret, ret2, ret3;
240 int num_pages = 1000;
241 int total_writes, total_reads, reads, count;
242 struct page_region *vec, *vec2;
243 char *mem, *m[2];
244 long walk_end;
245
246 vec_size = num_pages/2;
247 mem_size = num_pages * page_size;
248
249 vec = malloc(sizeof(struct page_region) * vec_size);
250 if (!vec)
251 ksft_exit_fail_msg("error nomem\n");
252
253 vec2 = malloc(sizeof(struct page_region) * vec_size);
254 if (!vec2)
255 ksft_exit_fail_msg("error nomem\n");
256
257 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
258 if (mem == MAP_FAILED)
259 ksft_exit_fail_msg("error nomem\n");
260
261 wp_init(mem, mem_size);
262 wp_addr_range(mem, mem_size);
263
264 /* 1. wrong operation */
265 ksft_test_result(pagemap_ioctl(mem, 0, vec, vec_size, 0,
266 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0,
267 "%s Zero range size is valid\n", __func__);
268
269 ksft_test_result(pagemap_ioctl(mem, mem_size, NULL, vec_size, 0,
270 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) < 0,
271 "%s output buffer must be specified with size\n", __func__);
272
273 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, 0, 0,
274 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0,
275 "%s output buffer can be 0\n", __func__);
276
277 ksft_test_result(pagemap_ioctl(mem, mem_size, 0, 0, 0,
278 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0,
279 "%s output buffer can be 0\n", __func__);
280
281 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, -1,
282 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0,
283 "%s wrong flag specified\n", __func__);
284
285 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
286 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC | 0xFF,
287 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0,
288 "%s flag has extra bits specified\n", __func__);
289
290 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0,
291 0, 0, 0, 0, PAGE_IS_WRITTEN) >= 0,
292 "%s no selection mask is specified\n", __func__);
293
294 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0,
295 0, PAGE_IS_WRITTEN, PAGE_IS_WRITTEN, 0, 0) == 0,
296 "%s no return mask is specified\n", __func__);
297
298 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0,
299 0, PAGE_IS_WRITTEN, 0, 0, 0x1000) < 0,
300 "%s wrong return mask specified\n", __func__);
301
302 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
303 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
304 0, 0xFFF, PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN) < 0,
305 "%s mixture of correct and wrong flag\n", __func__);
306
307 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
308 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
309 0, 0, 0, PAGEMAP_BITS_ALL, PAGE_IS_WRITTEN) >= 0,
310 "%s PAGEMAP_BITS_ALL can be specified with PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n",
311 __func__);
312
313 /* 2. Clear area with larger vec size */
314 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
315 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
316 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
317 ksft_test_result(ret >= 0, "%s Clear area with larger vec size\n", __func__);
318
319 /* 3. Repeated pattern of written and non-written pages */
320 for (i = 0; i < mem_size; i += 2 * page_size)
321 mem[i]++;
322
323 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0,
324 0, PAGE_IS_WRITTEN);
325 if (ret < 0)
326 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
327
328 ksft_test_result((unsigned long long)ret == mem_size/(page_size * 2),
329 "%s Repeated pattern of written and non-written pages\n", __func__);
330
331 /* 4. Repeated pattern of written and non-written pages in parts */
332 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
333 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
334 num_pages/2 - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
335 if (ret < 0)
336 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
337
338 ret2 = pagemap_ioctl(mem, mem_size, vec, 2, 0, 0, PAGE_IS_WRITTEN, 0, 0,
339 PAGE_IS_WRITTEN);
340 if (ret2 < 0)
341 ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno));
342
343 ret3 = pagemap_ioctl(mem, mem_size, vec, vec_size,
344 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
345 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
346 if (ret3 < 0)
347 ksft_exit_fail_msg("error %ld %d %s\n", ret3, errno, strerror(errno));
348
349 ksft_test_result((ret + ret3) == num_pages/2 && ret2 == 2,
350 "%s Repeated pattern of written and non-written pages in parts %ld %ld %ld\n",
351 __func__, ret, ret3, ret2);
352
353 /* 5. Repeated pattern of written and non-written pages max_pages */
354 for (i = 0; i < mem_size; i += 2 * page_size)
355 mem[i]++;
356 mem[(mem_size/page_size - 1) * page_size]++;
357
358 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
359 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
360 num_pages/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
361 if (ret < 0)
362 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
363
364 ret2 = pagemap_ioctl(mem, mem_size, vec, vec_size,
365 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
366 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
367 if (ret2 < 0)
368 ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno));
369
370 ksft_test_result(ret == num_pages/2 && ret2 == 1,
371 "%s Repeated pattern of written and non-written pages max_pages\n",
372 __func__);
373
374 /* 6. only get 2 dirty pages and clear them as well */
375 vec_size = mem_size/page_size;
376 memset(mem, -1, mem_size);
377
378 /* get and clear second and third pages */
379 ret = pagemap_ioctl(mem + page_size, 2 * page_size, vec, 1,
380 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
381 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
382 if (ret < 0)
383 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
384
385 ret2 = pagemap_ioctl(mem, mem_size, vec2, vec_size, 0, 0,
386 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
387 if (ret2 < 0)
388 ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno));
389
390 ksft_test_result(ret == 1 && LEN(vec[0]) == 2 &&
391 vec[0].start == (uintptr_t)(mem + page_size) &&
392 ret2 == 2 && LEN(vec2[0]) == 1 && vec2[0].start == (uintptr_t)mem &&
393 LEN(vec2[1]) == vec_size - 3 &&
394 vec2[1].start == (uintptr_t)(mem + 3 * page_size),
395 "%s only get 2 written pages and clear them as well\n", __func__);
396
397 wp_free(mem, mem_size);
398 munmap(mem, mem_size);
399
400 /* 7. Two regions */
401 m[0] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
402 if (m[0] == MAP_FAILED)
403 ksft_exit_fail_msg("error nomem\n");
404 m[1] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
405 if (m[1] == MAP_FAILED)
406 ksft_exit_fail_msg("error nomem\n");
407
408 wp_init(m[0], mem_size);
409 wp_init(m[1], mem_size);
410 wp_addr_range(m[0], mem_size);
411 wp_addr_range(m[1], mem_size);
412
413 memset(m[0], 'a', mem_size);
414 memset(m[1], 'b', mem_size);
415
416 wp_addr_range(m[0], mem_size);
417
418 ret = pagemap_ioctl(m[1], mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0,
419 PAGE_IS_WRITTEN);
420 if (ret < 0)
421 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
422
423 ksft_test_result(ret == 1 && LEN(vec[0]) == mem_size/page_size,
424 "%s Two regions\n", __func__);
425
426 wp_free(m[0], mem_size);
427 wp_free(m[1], mem_size);
428 munmap(m[0], mem_size);
429 munmap(m[1], mem_size);
430
431 free(vec);
432 free(vec2);
433
434 /* 8. Smaller vec */
435 mem_size = 1050 * page_size;
436 vec_size = mem_size/(page_size*2);
437
438 vec = malloc(sizeof(struct page_region) * vec_size);
439 if (!vec)
440 ksft_exit_fail_msg("error nomem\n");
441
442 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
443 if (mem == MAP_FAILED)
444 ksft_exit_fail_msg("error nomem\n");
445
446 wp_init(mem, mem_size);
447 wp_addr_range(mem, mem_size);
448
449 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
450 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
451 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
452 if (ret < 0)
453 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
454
455 for (i = 0; i < mem_size/page_size; i += 2)
456 mem[i * page_size]++;
457
458 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
459 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
460 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
461 if (ret < 0)
462 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
463
464 total_pages += ret;
465
466 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
467 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
468 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
469 if (ret < 0)
470 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
471
472 total_pages += ret;
473
474 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
475 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
476 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
477 if (ret < 0)
478 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
479
480 total_pages += ret;
481
482 ksft_test_result(total_pages == mem_size/(page_size*2), "%s Smaller max_pages\n", __func__);
483
484 free(vec);
485 wp_free(mem, mem_size);
486 munmap(mem, mem_size);
487 total_pages = 0;
488
489 /* 9. Smaller vec */
490 mem_size = 10000 * page_size;
491 vec_size = 50;
492
493 vec = malloc(sizeof(struct page_region) * vec_size);
494 if (!vec)
495 ksft_exit_fail_msg("error nomem\n");
496
497 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
498 if (mem == MAP_FAILED)
499 ksft_exit_fail_msg("error nomem\n");
500
501 wp_init(mem, mem_size);
502 wp_addr_range(mem, mem_size);
503
504 for (count = 0; count < TEST_ITERATIONS; count++) {
505 total_writes = total_reads = 0;
506 walk_end = (long)mem;
507
508 for (i = 0; i < mem_size; i += page_size) {
509 if (rand() % 2) {
510 mem[i]++;
511 total_writes++;
512 }
513 }
514
515 while (total_reads < total_writes) {
516 ret = pagemap_ioc((void *)walk_end, mem_size-(walk_end - (long)mem), vec,
517 vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
518 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
519 if (ret < 0)
520 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
521
522 if ((unsigned long)ret > vec_size)
523 break;
524
525 reads = get_reads(vec, ret);
526 total_reads += reads;
527 }
528
529 if (total_reads != total_writes)
530 break;
531 }
532
533 ksft_test_result(count == TEST_ITERATIONS, "Smaller vec\n");
534
535 free(vec);
536 wp_free(mem, mem_size);
537 munmap(mem, mem_size);
538
539 /* 10. Walk_end tester */
540 vec_size = 1000;
541 mem_size = vec_size * page_size;
542
543 vec = malloc(sizeof(struct page_region) * vec_size);
544 if (!vec)
545 ksft_exit_fail_msg("error nomem\n");
546
547 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
548 if (mem == MAP_FAILED)
549 ksft_exit_fail_msg("error nomem\n");
550
551 wp_init(mem, mem_size);
552 wp_addr_range(mem, mem_size);
553
554 memset(mem, 0, mem_size);
555
556 ret = pagemap_ioc(mem, 0, vec, vec_size, 0,
557 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
558 if (ret < 0)
559 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
560 ksft_test_result(ret == 0 && walk_end == (long)mem,
561 "Walk_end: Same start and end address\n");
562
563 ret = pagemap_ioc(mem, 0, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
564 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
565 if (ret < 0)
566 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
567 ksft_test_result(ret == 0 && walk_end == (long)mem,
568 "Walk_end: Same start and end with WP\n");
569
570 ret = pagemap_ioc(mem, 0, vec, 0, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
571 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
572 if (ret < 0)
573 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
574 ksft_test_result(ret == 0 && walk_end == (long)mem,
575 "Walk_end: Same start and end with 0 output buffer\n");
576
577 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
578 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
579 if (ret < 0)
580 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
581 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
582 "Walk_end: Big vec\n");
583
584 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
585 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
586 if (ret < 0)
587 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
588 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
589 "Walk_end: vec of minimum length\n");
590
591 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
592 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
593 if (ret < 0)
594 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
595 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
596 "Walk_end: Max pages specified\n");
597
598 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
599 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
600 if (ret < 0)
601 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
602 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size/2),
603 "Walk_end: Half max pages\n");
604
605 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
606 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
607 if (ret < 0)
608 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
609 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size),
610 "Walk_end: 1 max page\n");
611
612 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
613 -1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
614 if (ret < 0)
615 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
616 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
617 "Walk_end: max pages\n");
618
619 wp_addr_range(mem, mem_size);
620 for (i = 0; i < mem_size; i += 2 * page_size)
621 mem[i]++;
622
623 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
624 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
625 if (ret < 0)
626 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
627 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
628 "Walk_end sparse: Big vec\n");
629
630 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
631 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
632 if (ret < 0)
633 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
634 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2),
635 "Walk_end sparse: vec of minimum length\n");
636
637 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
638 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
639 if (ret < 0)
640 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
641 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2),
642 "Walk_end sparse: Max pages specified\n");
643
644 ret = pagemap_ioc(mem, mem_size, vec, vec_size/2, 0,
645 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
646 if (ret < 0)
647 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
648 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
649 "Walk_end sparse: Max pages specified\n");
650
651 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
652 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
653 if (ret < 0)
654 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
655 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
656 "Walk_end sparse: Max pages specified\n");
657
658 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
659 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
660 if (ret < 0)
661 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
662 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
663 "Walk_endsparse : Half max pages\n");
664
665 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
666 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
667 if (ret < 0)
668 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
669 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2),
670 "Walk_end: 1 max page\n");
671
672 free(vec);
673 wp_free(mem, mem_size);
674 munmap(mem, mem_size);
675
676 return 0;
677 }
678
base_tests(char * prefix,char * mem,unsigned long long mem_size,int skip)679 int base_tests(char *prefix, char *mem, unsigned long long mem_size, int skip)
680 {
681 unsigned long long vec_size;
682 int written;
683 struct page_region *vec, *vec2;
684
685 if (skip) {
686 ksft_test_result_skip("%s all new pages must not be written (dirty)\n", prefix);
687 ksft_test_result_skip("%s all pages must be written (dirty)\n", prefix);
688 ksft_test_result_skip("%s all pages dirty other than first and the last one\n",
689 prefix);
690 ksft_test_result_skip("%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix);
691 ksft_test_result_skip("%s only middle page dirty\n", prefix);
692 ksft_test_result_skip("%s only two middle pages dirty\n", prefix);
693 return 0;
694 }
695
696 vec_size = mem_size/page_size;
697 vec = malloc(sizeof(struct page_region) * vec_size);
698 vec2 = malloc(sizeof(struct page_region) * vec_size);
699
700 /* 1. all new pages must be not be written (dirty) */
701 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
702 vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
703 if (written < 0)
704 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
705
706 ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", prefix);
707
708 /* 2. all pages must be written */
709 memset(mem, -1, mem_size);
710
711 written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0,
712 PAGE_IS_WRITTEN);
713 if (written < 0)
714 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
715
716 ksft_test_result(written == 1 && LEN(vec[0]) == mem_size/page_size,
717 "%s all pages must be written (dirty)\n", prefix);
718
719 /* 3. all pages dirty other than first and the last one */
720 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
721 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
722 if (written < 0)
723 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
724
725 memset(mem + page_size, 0, mem_size - (2 * page_size));
726
727 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
728 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
729 if (written < 0)
730 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
731
732 ksft_test_result(written == 1 && LEN(vec[0]) >= vec_size - 2 && LEN(vec[0]) <= vec_size,
733 "%s all pages dirty other than first and the last one\n", prefix);
734
735 written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0,
736 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
737 if (written < 0)
738 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
739
740 ksft_test_result(written == 0,
741 "%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix);
742
743 /* 4. only middle page dirty */
744 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
745 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
746 if (written < 0)
747 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
748
749 mem[vec_size/2 * page_size]++;
750
751 written = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN,
752 0, 0, PAGE_IS_WRITTEN);
753 if (written < 0)
754 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
755
756 ksft_test_result(written == 1 && LEN(vec[0]) >= 1,
757 "%s only middle page dirty\n", prefix);
758
759 /* 5. only two middle pages dirty and walk over only middle pages */
760 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
761 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE);
762 if (written < 0)
763 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
764
765 mem[vec_size/2 * page_size]++;
766 mem[(vec_size/2 + 1) * page_size]++;
767
768 written = pagemap_ioctl(&mem[vec_size/2 * page_size], 2 * page_size, vec, 1, 0,
769 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE);
770 if (written < 0)
771 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
772
773 ksft_test_result(written == 1 && vec[0].start == (uintptr_t)(&mem[vec_size/2 * page_size])
774 && LEN(vec[0]) == 2,
775 "%s only two middle pages dirty\n", prefix);
776
777 free(vec);
778 free(vec2);
779 return 0;
780 }
781
gethugepage(int map_size)782 void *gethugepage(int map_size)
783 {
784 int ret;
785 char *map;
786
787 map = memalign(hpage_size, map_size);
788 if (!map)
789 ksft_exit_fail_msg("memalign failed %d %s\n", errno, strerror(errno));
790
791 ret = madvise(map, map_size, MADV_HUGEPAGE);
792 if (ret)
793 return NULL;
794
795 memset(map, 0, map_size);
796
797 return map;
798 }
799
hpage_unit_tests(void)800 int hpage_unit_tests(void)
801 {
802 char *map;
803 int ret, ret2;
804 size_t num_pages = 10;
805 unsigned long long map_size = hpage_size * num_pages;
806 unsigned long long vec_size = map_size/page_size;
807 struct page_region *vec, *vec2;
808
809 vec = malloc(sizeof(struct page_region) * vec_size);
810 vec2 = malloc(sizeof(struct page_region) * vec_size);
811 if (!vec || !vec2)
812 ksft_exit_fail_msg("malloc failed\n");
813
814 map = gethugepage(map_size);
815 if (map) {
816 wp_init(map, map_size);
817 wp_addr_range(map, map_size);
818
819 /* 1. all new huge page must not be written (dirty) */
820 ret = pagemap_ioctl(map, map_size, vec, vec_size,
821 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
822 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
823 if (ret < 0)
824 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
825
826 ksft_test_result(ret == 0, "%s all new huge page must not be written (dirty)\n",
827 __func__);
828
829 /* 2. all the huge page must not be written */
830 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
831 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
832 if (ret < 0)
833 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
834
835 ksft_test_result(ret == 0, "%s all the huge page must not be written\n", __func__);
836
837 /* 3. all the huge page must be written and clear dirty as well */
838 memset(map, -1, map_size);
839 ret = pagemap_ioctl(map, map_size, vec, vec_size,
840 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
841 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
842 if (ret < 0)
843 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
844
845 ksft_test_result(ret == 1 && vec[0].start == (uintptr_t)map &&
846 LEN(vec[0]) == vec_size && vec[0].categories == PAGE_IS_WRITTEN,
847 "%s all the huge page must be written and clear\n", __func__);
848
849 /* 4. only middle page written */
850 wp_free(map, map_size);
851 free(map);
852 map = gethugepage(map_size);
853 wp_init(map, map_size);
854 wp_addr_range(map, map_size);
855 map[vec_size/2 * page_size]++;
856
857 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
858 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
859 if (ret < 0)
860 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
861
862 ksft_test_result(ret == 1 && LEN(vec[0]) > 0,
863 "%s only middle page written\n", __func__);
864
865 wp_free(map, map_size);
866 free(map);
867 } else {
868 ksft_test_result_skip("%s all new huge page must be written\n", __func__);
869 ksft_test_result_skip("%s all the huge page must not be written\n", __func__);
870 ksft_test_result_skip("%s all the huge page must be written and clear\n", __func__);
871 ksft_test_result_skip("%s only middle page written\n", __func__);
872 }
873
874 /* 5. clear first half of huge page */
875 map = gethugepage(map_size);
876 if (map) {
877 wp_init(map, map_size);
878 wp_addr_range(map, map_size);
879
880 memset(map, 0, map_size);
881
882 wp_addr_range(map, map_size/2);
883
884 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
885 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
886 if (ret < 0)
887 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
888
889 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 &&
890 vec[0].start == (uintptr_t)(map + map_size/2),
891 "%s clear first half of huge page\n", __func__);
892 wp_free(map, map_size);
893 free(map);
894 } else {
895 ksft_test_result_skip("%s clear first half of huge page\n", __func__);
896 }
897
898 /* 6. clear first half of huge page with limited buffer */
899 map = gethugepage(map_size);
900 if (map) {
901 wp_init(map, map_size);
902 wp_addr_range(map, map_size);
903
904 memset(map, 0, map_size);
905
906 ret = pagemap_ioctl(map, map_size, vec, vec_size,
907 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
908 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
909 if (ret < 0)
910 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
911
912 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
913 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
914 if (ret < 0)
915 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
916
917 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 &&
918 vec[0].start == (uintptr_t)(map + map_size/2),
919 "%s clear first half of huge page with limited buffer\n",
920 __func__);
921 wp_free(map, map_size);
922 free(map);
923 } else {
924 ksft_test_result_skip("%s clear first half of huge page with limited buffer\n",
925 __func__);
926 }
927
928 /* 7. clear second half of huge page */
929 map = gethugepage(map_size);
930 if (map) {
931 wp_init(map, map_size);
932 wp_addr_range(map, map_size);
933
934 memset(map, -1, map_size);
935
936 ret = pagemap_ioctl(map + map_size/2, map_size/2, vec, vec_size,
937 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, vec_size/2,
938 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
939 if (ret < 0)
940 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
941
942 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
943 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
944 if (ret < 0)
945 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
946
947 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2,
948 "%s clear second half huge page\n", __func__);
949 wp_free(map, map_size);
950 free(map);
951 } else {
952 ksft_test_result_skip("%s clear second half huge page\n", __func__);
953 }
954
955 /* 8. get half huge page */
956 map = gethugepage(map_size);
957 if (map) {
958 wp_init(map, map_size);
959 wp_addr_range(map, map_size);
960
961 memset(map, -1, map_size);
962 usleep(100);
963
964 ret = pagemap_ioctl(map, map_size, vec, 1,
965 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
966 hpage_size/(2*page_size), PAGE_IS_WRITTEN, 0, 0,
967 PAGE_IS_WRITTEN);
968 if (ret < 0)
969 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
970
971 ksft_test_result(ret == 1 && LEN(vec[0]) == hpage_size/(2*page_size),
972 "%s get half huge page\n", __func__);
973
974 ret2 = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
975 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
976 if (ret2 < 0)
977 ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno));
978
979 ksft_test_result(ret2 == 1 && LEN(vec[0]) == (map_size - hpage_size/2)/page_size,
980 "%s get half huge page\n", __func__);
981
982 wp_free(map, map_size);
983 free(map);
984 } else {
985 ksft_test_result_skip("%s get half huge page\n", __func__);
986 ksft_test_result_skip("%s get half huge page\n", __func__);
987 }
988
989 free(vec);
990 free(vec2);
991 return 0;
992 }
993
unmapped_region_tests(void)994 int unmapped_region_tests(void)
995 {
996 void *start = (void *)0x10000000;
997 int written, len = 0x00040000;
998 int vec_size = len / page_size;
999 struct page_region *vec = malloc(sizeof(struct page_region) * vec_size);
1000
1001 /* 1. Get written pages */
1002 written = pagemap_ioctl(start, len, vec, vec_size, 0, 0,
1003 PAGEMAP_NON_WRITTEN_BITS, 0, 0, PAGEMAP_NON_WRITTEN_BITS);
1004 if (written < 0)
1005 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
1006
1007 ksft_test_result(written >= 0, "%s Get status of pages\n", __func__);
1008
1009 free(vec);
1010 return 0;
1011 }
1012
test_simple(void)1013 static void test_simple(void)
1014 {
1015 int i;
1016 char *map;
1017 struct page_region vec;
1018
1019 map = aligned_alloc(page_size, page_size);
1020 if (!map)
1021 ksft_exit_fail_msg("aligned_alloc failed\n");
1022
1023 wp_init(map, page_size);
1024 wp_addr_range(map, page_size);
1025
1026 for (i = 0 ; i < TEST_ITERATIONS; i++) {
1027 if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0,
1028 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 1) {
1029 ksft_print_msg("written bit was 1, but should be 0 (i=%d)\n", i);
1030 break;
1031 }
1032
1033 wp_addr_range(map, page_size);
1034 /* Write something to the page to get the written bit enabled on the page */
1035 map[0]++;
1036
1037 if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0,
1038 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0) {
1039 ksft_print_msg("written bit was 0, but should be 1 (i=%d)\n", i);
1040 break;
1041 }
1042
1043 wp_addr_range(map, page_size);
1044 }
1045 wp_free(map, page_size);
1046 free(map);
1047
1048 ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__);
1049 }
1050
sanity_tests(void)1051 int sanity_tests(void)
1052 {
1053 unsigned long long mem_size, vec_size;
1054 int ret, fd, i, buf_size;
1055 struct page_region *vec;
1056 char *mem, *fmem;
1057 struct stat sbuf;
1058 char *tmp_buf;
1059
1060 /* 1. wrong operation */
1061 mem_size = 10 * page_size;
1062 vec_size = mem_size / page_size;
1063
1064 vec = malloc(sizeof(struct page_region) * vec_size);
1065 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1066 if (mem == MAP_FAILED || vec == MAP_FAILED)
1067 ksft_exit_fail_msg("error nomem\n");
1068
1069 wp_init(mem, mem_size);
1070 wp_addr_range(mem, mem_size);
1071
1072 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
1073 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
1074 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0,
1075 "%s WP op can be specified with !PAGE_IS_WRITTEN\n", __func__);
1076 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1077 PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0,
1078 "%s required_mask specified\n", __func__);
1079 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1080 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL) >= 0,
1081 "%s anyof_mask specified\n", __func__);
1082 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1083 0, 0, PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL) >= 0,
1084 "%s excluded_mask specified\n", __func__);
1085 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1086 PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL, 0,
1087 PAGEMAP_BITS_ALL) >= 0,
1088 "%s required_mask and anyof_mask specified\n", __func__);
1089 wp_free(mem, mem_size);
1090 munmap(mem, mem_size);
1091
1092 /* 2. Get sd and present pages with anyof_mask */
1093 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1094 if (mem == MAP_FAILED)
1095 ksft_exit_fail_msg("error nomem\n");
1096 wp_init(mem, mem_size);
1097 wp_addr_range(mem, mem_size);
1098
1099 memset(mem, 0, mem_size);
1100
1101 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1102 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL);
1103 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1104 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) ==
1105 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT),
1106 "%s Get sd and present pages with anyof_mask\n", __func__);
1107
1108 /* 3. Get sd and present pages with required_mask */
1109 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1110 PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL);
1111 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1112 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) ==
1113 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT),
1114 "%s Get all the pages with required_mask\n", __func__);
1115
1116 /* 4. Get sd and present pages with required_mask and anyof_mask */
1117 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1118 PAGE_IS_WRITTEN, PAGE_IS_PRESENT, 0, PAGEMAP_BITS_ALL);
1119 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1120 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) ==
1121 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT),
1122 "%s Get sd and present pages with required_mask and anyof_mask\n",
1123 __func__);
1124
1125 /* 5. Don't get sd pages */
1126 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1127 PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN, PAGEMAP_BITS_ALL);
1128 ksft_test_result(ret == 0, "%s Don't get sd pages\n", __func__);
1129
1130 /* 6. Don't get present pages */
1131 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1132 PAGE_IS_PRESENT, 0, PAGE_IS_PRESENT, PAGEMAP_BITS_ALL);
1133 ksft_test_result(ret == 0, "%s Don't get present pages\n", __func__);
1134
1135 wp_free(mem, mem_size);
1136 munmap(mem, mem_size);
1137
1138 /* 8. Find written present pages with return mask */
1139 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1140 if (mem == MAP_FAILED)
1141 ksft_exit_fail_msg("error nomem\n");
1142 wp_init(mem, mem_size);
1143 wp_addr_range(mem, mem_size);
1144
1145 memset(mem, 0, mem_size);
1146
1147 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
1148 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
1149 0, PAGEMAP_BITS_ALL, 0, PAGE_IS_WRITTEN);
1150 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1151 vec[0].categories == PAGE_IS_WRITTEN,
1152 "%s Find written present pages with return mask\n", __func__);
1153 wp_free(mem, mem_size);
1154 munmap(mem, mem_size);
1155
1156 /* 9. Memory mapped file */
1157 fd = open(progname, O_RDONLY);
1158 if (fd < 0)
1159 ksft_exit_fail_msg("%s Memory mapped file\n", __func__);
1160
1161 ret = stat(progname, &sbuf);
1162 if (ret < 0)
1163 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1164
1165 fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
1166 if (fmem == MAP_FAILED)
1167 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1168
1169 tmp_buf = malloc(sbuf.st_size);
1170 memcpy(tmp_buf, fmem, sbuf.st_size);
1171
1172 ret = pagemap_ioctl(fmem, sbuf.st_size, vec, vec_size, 0, 0,
1173 0, PAGEMAP_NON_WRITTEN_BITS, 0, PAGEMAP_NON_WRITTEN_BITS);
1174
1175 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem &&
1176 LEN(vec[0]) == ceilf((float)sbuf.st_size/page_size) &&
1177 (vec[0].categories & PAGE_IS_FILE),
1178 "%s Memory mapped file\n", __func__);
1179
1180 munmap(fmem, sbuf.st_size);
1181 close(fd);
1182
1183 /* 10. Create and read/write to a memory mapped file */
1184 buf_size = page_size * 10;
1185
1186 fd = open(__FILE__".tmp2", O_RDWR | O_CREAT, 0666);
1187 if (fd < 0)
1188 ksft_exit_fail_msg("Read/write to memory: %s\n",
1189 strerror(errno));
1190
1191 for (i = 0; i < buf_size; i++)
1192 if (write(fd, "c", 1) < 0)
1193 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n");
1194
1195 fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1196 if (fmem == MAP_FAILED)
1197 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1198
1199 wp_init(fmem, buf_size);
1200 wp_addr_range(fmem, buf_size);
1201
1202 for (i = 0; i < buf_size; i++)
1203 fmem[i] = 'z';
1204
1205 msync(fmem, buf_size, MS_SYNC);
1206
1207 ret = pagemap_ioctl(fmem, buf_size, vec, vec_size, 0, 0,
1208 PAGE_IS_WRITTEN, PAGE_IS_PRESENT | PAGE_IS_SWAPPED | PAGE_IS_FILE, 0,
1209 PAGEMAP_BITS_ALL);
1210
1211 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem &&
1212 LEN(vec[0]) == (buf_size/page_size) &&
1213 (vec[0].categories & PAGE_IS_WRITTEN),
1214 "%s Read/write to memory\n", __func__);
1215
1216 wp_free(fmem, buf_size);
1217 munmap(fmem, buf_size);
1218 close(fd);
1219
1220 free(vec);
1221 return 0;
1222 }
1223
mprotect_tests(void)1224 int mprotect_tests(void)
1225 {
1226 int ret;
1227 char *mem, *mem2;
1228 struct page_region vec;
1229 int pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
1230
1231 if (pagemap_fd < 0) {
1232 fprintf(stderr, "open() failed\n");
1233 exit(1);
1234 }
1235
1236 /* 1. Map two pages */
1237 mem = mmap(0, 2 * page_size, PROT_READ|PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1238 if (mem == MAP_FAILED)
1239 ksft_exit_fail_msg("error nomem\n");
1240 wp_init(mem, 2 * page_size);
1241 wp_addr_range(mem, 2 * page_size);
1242
1243 /* Populate both pages. */
1244 memset(mem, 1, 2 * page_size);
1245
1246 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN,
1247 0, 0, PAGE_IS_WRITTEN);
1248 if (ret < 0)
1249 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1250
1251 ksft_test_result(ret == 1 && LEN(vec) == 2, "%s Both pages written\n", __func__);
1252
1253 /* 2. Start tracking */
1254 wp_addr_range(mem, 2 * page_size);
1255
1256 ksft_test_result(pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0,
1257 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0,
1258 "%s Both pages are not written (dirty)\n", __func__);
1259
1260 /* 3. Remap the second page */
1261 mem2 = mmap(mem + page_size, page_size, PROT_READ|PROT_WRITE,
1262 MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
1263 if (mem2 == MAP_FAILED)
1264 ksft_exit_fail_msg("error nomem\n");
1265 wp_init(mem2, page_size);
1266 wp_addr_range(mem2, page_size);
1267
1268 /* Protect + unprotect. */
1269 mprotect(mem, page_size, PROT_NONE);
1270 mprotect(mem, 2 * page_size, PROT_READ);
1271 mprotect(mem, 2 * page_size, PROT_READ|PROT_WRITE);
1272
1273 /* Modify both pages. */
1274 memset(mem, 2, 2 * page_size);
1275
1276 /* Protect + unprotect. */
1277 mprotect(mem, page_size, PROT_NONE);
1278 mprotect(mem, page_size, PROT_READ);
1279 mprotect(mem, page_size, PROT_READ|PROT_WRITE);
1280
1281 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN,
1282 0, 0, PAGE_IS_WRITTEN);
1283 if (ret < 0)
1284 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1285
1286 ksft_test_result(ret == 1 && LEN(vec) == 2,
1287 "%s Both pages written after remap and mprotect\n", __func__);
1288
1289 /* 4. Clear and make the pages written */
1290 wp_addr_range(mem, 2 * page_size);
1291
1292 memset(mem, 'A', 2 * page_size);
1293
1294 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN,
1295 0, 0, PAGE_IS_WRITTEN);
1296 if (ret < 0)
1297 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1298
1299 ksft_test_result(ret == 1 && LEN(vec) == 2,
1300 "%s Clear and make the pages written\n", __func__);
1301
1302 wp_free(mem, 2 * page_size);
1303 munmap(mem, 2 * page_size);
1304 return 0;
1305 }
1306
1307 /* transact test */
1308 static const unsigned int nthreads = 6, pages_per_thread = 32, access_per_thread = 8;
1309 static pthread_barrier_t start_barrier, end_barrier;
1310 static unsigned int extra_thread_faults;
1311 static unsigned int iter_count = 1000;
1312 static volatile int finish;
1313
get_dirty_pages_reset(char * mem,unsigned int count,int reset,int page_size)1314 static ssize_t get_dirty_pages_reset(char *mem, unsigned int count,
1315 int reset, int page_size)
1316 {
1317 struct pm_scan_arg arg = {0};
1318 struct page_region rgns[256];
1319 unsigned long long i, j;
1320 long ret;
1321 int cnt;
1322
1323 arg.size = sizeof(struct pm_scan_arg);
1324 arg.start = (uintptr_t)mem;
1325 arg.max_pages = count;
1326 arg.end = (uintptr_t)(mem + count * page_size);
1327 arg.vec = (uintptr_t)rgns;
1328 arg.vec_len = sizeof(rgns) / sizeof(*rgns);
1329 if (reset)
1330 arg.flags |= PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC;
1331 arg.category_mask = PAGE_IS_WRITTEN;
1332 arg.return_mask = PAGE_IS_WRITTEN;
1333
1334 ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
1335 if (ret < 0)
1336 ksft_exit_fail_msg("ioctl failed\n");
1337
1338 cnt = 0;
1339 for (i = 0; i < (unsigned long)ret; ++i) {
1340 if (rgns[i].categories != PAGE_IS_WRITTEN)
1341 ksft_exit_fail_msg("wrong flags\n");
1342
1343 for (j = 0; j < LEN(rgns[i]); ++j)
1344 cnt++;
1345 }
1346
1347 return cnt;
1348 }
1349
thread_proc(void * mem)1350 void *thread_proc(void *mem)
1351 {
1352 int *m = mem;
1353 long curr_faults, faults;
1354 struct rusage r;
1355 unsigned int i;
1356 int ret;
1357
1358 if (getrusage(RUSAGE_THREAD, &r))
1359 ksft_exit_fail_msg("getrusage\n");
1360
1361 curr_faults = r.ru_minflt;
1362
1363 while (!finish) {
1364 ret = pthread_barrier_wait(&start_barrier);
1365 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1366 ksft_exit_fail_msg("pthread_barrier_wait\n");
1367
1368 for (i = 0; i < access_per_thread; ++i)
1369 __atomic_add_fetch(m + i * (0x1000 / sizeof(*m)), 1, __ATOMIC_SEQ_CST);
1370
1371 ret = pthread_barrier_wait(&end_barrier);
1372 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1373 ksft_exit_fail_msg("pthread_barrier_wait\n");
1374
1375 if (getrusage(RUSAGE_THREAD, &r))
1376 ksft_exit_fail_msg("getrusage\n");
1377
1378 faults = r.ru_minflt - curr_faults;
1379 if (faults < access_per_thread)
1380 ksft_exit_fail_msg("faults < access_per_thread");
1381
1382 __atomic_add_fetch(&extra_thread_faults, faults - access_per_thread,
1383 __ATOMIC_SEQ_CST);
1384 curr_faults = r.ru_minflt;
1385 }
1386
1387 return NULL;
1388 }
1389
transact_test(int page_size)1390 static void transact_test(int page_size)
1391 {
1392 unsigned int i, count, extra_pages;
1393 unsigned int c;
1394 pthread_t th;
1395 char *mem;
1396 int ret;
1397
1398 if (pthread_barrier_init(&start_barrier, NULL, nthreads + 1))
1399 ksft_exit_fail_msg("pthread_barrier_init\n");
1400
1401 if (pthread_barrier_init(&end_barrier, NULL, nthreads + 1))
1402 ksft_exit_fail_msg("pthread_barrier_init\n");
1403
1404 mem = mmap(NULL, 0x1000 * nthreads * pages_per_thread, PROT_READ | PROT_WRITE,
1405 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1406 if (mem == MAP_FAILED)
1407 ksft_exit_fail_msg("Error mmap %s.\n", strerror(errno));
1408
1409 wp_init(mem, 0x1000 * nthreads * pages_per_thread);
1410 wp_addr_range(mem, 0x1000 * nthreads * pages_per_thread);
1411
1412 memset(mem, 0, 0x1000 * nthreads * pages_per_thread);
1413
1414 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size);
1415 ksft_test_result(count > 0, "%s count %u\n", __func__, count);
1416 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size);
1417 ksft_test_result(count == 0, "%s count %u\n", __func__, count);
1418
1419 finish = 0;
1420 for (i = 0; i < nthreads; ++i)
1421 pthread_create(&th, NULL, thread_proc, mem + 0x1000 * i * pages_per_thread);
1422
1423 extra_pages = 0;
1424 for (i = 0; i < iter_count; ++i) {
1425 count = 0;
1426
1427 ret = pthread_barrier_wait(&start_barrier);
1428 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1429 ksft_exit_fail_msg("pthread_barrier_wait\n");
1430
1431 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1,
1432 page_size);
1433
1434 ret = pthread_barrier_wait(&end_barrier);
1435 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1436 ksft_exit_fail_msg("pthread_barrier_wait\n");
1437
1438 if (count > nthreads * access_per_thread)
1439 ksft_exit_fail_msg("Too big count %u expected %u, iter %u\n",
1440 count, nthreads * access_per_thread, i);
1441
1442 c = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size);
1443 count += c;
1444
1445 if (c > nthreads * access_per_thread) {
1446 ksft_test_result_fail(" %s count > nthreads\n", __func__);
1447 return;
1448 }
1449
1450 if (count != nthreads * access_per_thread) {
1451 /*
1452 * The purpose of the test is to make sure that no page updates are lost
1453 * when the page updates and read-resetting soft dirty flags are performed
1454 * in parallel. However, it is possible that the application will get the
1455 * soft dirty flags twice on the two consecutive read-resets. This seems
1456 * unavoidable as soft dirty flag is handled in software through page faults
1457 * in kernel. While the updating the flags is supposed to be synchronized
1458 * between page fault handling and read-reset, it is possible that
1459 * read-reset happens after page fault PTE update but before the application
1460 * re-executes write instruction. So read-reset gets the flag, clears write
1461 * access and application gets page fault again for the same write.
1462 */
1463 if (count < nthreads * access_per_thread) {
1464 ksft_test_result_fail("Lost update, iter %u, %u vs %u.\n", i, count,
1465 nthreads * access_per_thread);
1466 return;
1467 }
1468
1469 extra_pages += count - nthreads * access_per_thread;
1470 }
1471 }
1472
1473 pthread_barrier_wait(&start_barrier);
1474 finish = 1;
1475 pthread_barrier_wait(&end_barrier);
1476
1477 ksft_test_result_pass("%s Extra pages %u (%.1lf%%), extra thread faults %u.\n", __func__,
1478 extra_pages,
1479 100.0 * extra_pages / (iter_count * nthreads * access_per_thread),
1480 extra_thread_faults);
1481 }
1482
main(int argc,char * argv[])1483 int main(int __attribute__((unused)) argc, char *argv[])
1484 {
1485 int shmid, buf_size, fd, i, ret;
1486 unsigned long long mem_size;
1487 char *mem, *map, *fmem;
1488 struct stat sbuf;
1489
1490 progname = argv[0];
1491
1492 ksft_print_header();
1493
1494 if (init_uffd())
1495 ksft_exit_pass();
1496
1497 ksft_set_plan(115);
1498
1499 page_size = getpagesize();
1500 hpage_size = read_pmd_pagesize();
1501
1502 pagemap_fd = open(PAGEMAP, O_RDONLY);
1503 if (pagemap_fd < 0)
1504 return -EINVAL;
1505
1506 /* 1. Sanity testing */
1507 sanity_tests_sd();
1508
1509 /* 2. Normal page testing */
1510 mem_size = 10 * page_size;
1511 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1512 if (mem == MAP_FAILED)
1513 ksft_exit_fail_msg("error nomem\n");
1514 wp_init(mem, mem_size);
1515 wp_addr_range(mem, mem_size);
1516
1517 base_tests("Page testing:", mem, mem_size, 0);
1518
1519 wp_free(mem, mem_size);
1520 munmap(mem, mem_size);
1521
1522 /* 3. Large page testing */
1523 mem_size = 512 * 10 * page_size;
1524 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1525 if (mem == MAP_FAILED)
1526 ksft_exit_fail_msg("error nomem\n");
1527 wp_init(mem, mem_size);
1528 wp_addr_range(mem, mem_size);
1529
1530 base_tests("Large Page testing:", mem, mem_size, 0);
1531
1532 wp_free(mem, mem_size);
1533 munmap(mem, mem_size);
1534
1535 /* 4. Huge page testing */
1536 map = gethugepage(hpage_size);
1537 if (map) {
1538 wp_init(map, hpage_size);
1539 wp_addr_range(map, hpage_size);
1540 base_tests("Huge page testing:", map, hpage_size, 0);
1541 wp_free(map, hpage_size);
1542 free(map);
1543 } else {
1544 base_tests("Huge page testing:", NULL, 0, 1);
1545 }
1546
1547 /* 5. SHM Hugetlb page testing */
1548 mem_size = 2*1024*1024;
1549 mem = gethugetlb_mem(mem_size, &shmid);
1550 if (mem) {
1551 wp_init(mem, mem_size);
1552 wp_addr_range(mem, mem_size);
1553
1554 base_tests("Hugetlb shmem testing:", mem, mem_size, 0);
1555
1556 wp_free(mem, mem_size);
1557 shmctl(shmid, IPC_RMID, NULL);
1558 } else {
1559 base_tests("Hugetlb shmem testing:", NULL, 0, 1);
1560 }
1561
1562 /* 6. Hugetlb page testing */
1563 mem = gethugetlb_mem(mem_size, NULL);
1564 if (mem) {
1565 wp_init(mem, mem_size);
1566 wp_addr_range(mem, mem_size);
1567
1568 base_tests("Hugetlb mem testing:", mem, mem_size, 0);
1569
1570 wp_free(mem, mem_size);
1571 } else {
1572 base_tests("Hugetlb mem testing:", NULL, 0, 1);
1573 }
1574
1575 /* 7. File Hugetlb testing */
1576 mem_size = 2*1024*1024;
1577 fd = memfd_create("uffd-test", MFD_HUGETLB | MFD_NOEXEC_SEAL);
1578 if (fd < 0)
1579 ksft_exit_fail_msg("uffd-test creation failed %d %s\n", errno, strerror(errno));
1580 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1581 if (mem != MAP_FAILED) {
1582 wp_init(mem, mem_size);
1583 wp_addr_range(mem, mem_size);
1584
1585 base_tests("Hugetlb shmem testing:", mem, mem_size, 0);
1586
1587 wp_free(mem, mem_size);
1588 shmctl(shmid, IPC_RMID, NULL);
1589 } else {
1590 base_tests("Hugetlb shmem testing:", NULL, 0, 1);
1591 }
1592 close(fd);
1593
1594 /* 8. File memory testing */
1595 buf_size = page_size * 10;
1596
1597 fd = open(__FILE__".tmp0", O_RDWR | O_CREAT, 0777);
1598 if (fd < 0)
1599 ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n",
1600 strerror(errno));
1601
1602 for (i = 0; i < buf_size; i++)
1603 if (write(fd, "c", 1) < 0)
1604 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n");
1605
1606 ret = stat(__FILE__".tmp0", &sbuf);
1607 if (ret < 0)
1608 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1609
1610 fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1611 if (fmem == MAP_FAILED)
1612 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1613
1614 wp_init(fmem, sbuf.st_size);
1615 wp_addr_range(fmem, sbuf.st_size);
1616
1617 base_tests("File memory testing:", fmem, sbuf.st_size, 0);
1618
1619 wp_free(fmem, sbuf.st_size);
1620 munmap(fmem, sbuf.st_size);
1621 close(fd);
1622
1623 /* 9. File memory testing */
1624 buf_size = page_size * 10;
1625
1626 fd = memfd_create(__FILE__".tmp00", MFD_NOEXEC_SEAL);
1627 if (fd < 0)
1628 ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n",
1629 strerror(errno));
1630
1631 if (ftruncate(fd, buf_size))
1632 ksft_exit_fail_msg("Error ftruncate\n");
1633
1634 for (i = 0; i < buf_size; i++)
1635 if (write(fd, "c", 1) < 0)
1636 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n");
1637
1638 fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1639 if (fmem == MAP_FAILED)
1640 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1641
1642 wp_init(fmem, buf_size);
1643 wp_addr_range(fmem, buf_size);
1644
1645 base_tests("File anonymous memory testing:", fmem, buf_size, 0);
1646
1647 wp_free(fmem, buf_size);
1648 munmap(fmem, buf_size);
1649 close(fd);
1650
1651 /* 10. Huge page tests */
1652 hpage_unit_tests();
1653
1654 /* 11. Iterative test */
1655 test_simple();
1656
1657 /* 12. Mprotect test */
1658 mprotect_tests();
1659
1660 /* 13. Transact test */
1661 transact_test(page_size);
1662
1663 /* 14. Sanity testing */
1664 sanity_tests();
1665
1666 /*15. Unmapped address test */
1667 unmapped_region_tests();
1668
1669 /* 16. Userfaultfd tests */
1670 userfaultfd_tests();
1671
1672 close(pagemap_fd);
1673 ksft_exit_pass();
1674 }
1675