1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #define _GNU_SOURCE
4 #include "../kselftest_harness.h"
5 #include <asm-generic/mman.h> /* Force the import of the tools version. */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <linux/userfaultfd.h>
10 #include <setjmp.h>
11 #include <signal.h>
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/ioctl.h>
17 #include <sys/mman.h>
18 #include <sys/syscall.h>
19 #include <sys/uio.h>
20 #include <unistd.h>
21
22 /*
23 * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
24 *
25 * "If the signal occurs other than as the result of calling the abort or raise
26 * function, the behavior is undefined if the signal handler refers to any
27 * object with static storage duration other than by assigning a value to an
28 * object declared as volatile sig_atomic_t"
29 */
30 static volatile sig_atomic_t signal_jump_set;
31 static sigjmp_buf signal_jmp_buf;
32
33 /*
34 * Ignore the checkpatch warning, we must read from x but don't want to do
35 * anything with it in order to trigger a read page fault. We therefore must use
36 * volatile to stop the compiler from optimising this away.
37 */
38 #define FORCE_READ(x) (*(volatile typeof(x) *)x)
39
userfaultfd(int flags)40 static int userfaultfd(int flags)
41 {
42 return syscall(SYS_userfaultfd, flags);
43 }
44
handle_fatal(int c)45 static void handle_fatal(int c)
46 {
47 if (!signal_jump_set)
48 return;
49
50 siglongjmp(signal_jmp_buf, c);
51 }
52
pidfd_open(pid_t pid,unsigned int flags)53 static int pidfd_open(pid_t pid, unsigned int flags)
54 {
55 return syscall(SYS_pidfd_open, pid, flags);
56 }
57
sys_process_madvise(int pidfd,const struct iovec * iovec,size_t n,int advice,unsigned int flags)58 static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
59 size_t n, int advice, unsigned int flags)
60 {
61 return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
62 }
63
64 /*
65 * Enable our signal catcher and try to read/write the specified buffer. The
66 * return value indicates whether the read/write succeeds without a fatal
67 * signal.
68 */
try_access_buf(char * ptr,bool write)69 static bool try_access_buf(char *ptr, bool write)
70 {
71 bool failed;
72
73 /* Tell signal handler to jump back here on fatal signal. */
74 signal_jump_set = true;
75 /* If a fatal signal arose, we will jump back here and failed is set. */
76 failed = sigsetjmp(signal_jmp_buf, 0) != 0;
77
78 if (!failed) {
79 if (write)
80 *ptr = 'x';
81 else
82 FORCE_READ(ptr);
83 }
84
85 signal_jump_set = false;
86 return !failed;
87 }
88
89 /* Try and read from a buffer, return true if no fatal signal. */
try_read_buf(char * ptr)90 static bool try_read_buf(char *ptr)
91 {
92 return try_access_buf(ptr, false);
93 }
94
95 /* Try and write to a buffer, return true if no fatal signal. */
try_write_buf(char * ptr)96 static bool try_write_buf(char *ptr)
97 {
98 return try_access_buf(ptr, true);
99 }
100
101 /*
102 * Try and BOTH read from AND write to a buffer, return true if BOTH operations
103 * succeed.
104 */
try_read_write_buf(char * ptr)105 static bool try_read_write_buf(char *ptr)
106 {
107 return try_read_buf(ptr) && try_write_buf(ptr);
108 }
109
FIXTURE(guard_pages)110 FIXTURE(guard_pages)
111 {
112 unsigned long page_size;
113 };
114
FIXTURE_SETUP(guard_pages)115 FIXTURE_SETUP(guard_pages)
116 {
117 struct sigaction act = {
118 .sa_handler = &handle_fatal,
119 .sa_flags = SA_NODEFER,
120 };
121
122 sigemptyset(&act.sa_mask);
123 if (sigaction(SIGSEGV, &act, NULL))
124 ksft_exit_fail_perror("sigaction");
125
126 self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
127 };
128
FIXTURE_TEARDOWN(guard_pages)129 FIXTURE_TEARDOWN(guard_pages)
130 {
131 struct sigaction act = {
132 .sa_handler = SIG_DFL,
133 .sa_flags = SA_NODEFER,
134 };
135
136 sigemptyset(&act.sa_mask);
137 sigaction(SIGSEGV, &act, NULL);
138 }
139
TEST_F(guard_pages,basic)140 TEST_F(guard_pages, basic)
141 {
142 const unsigned long NUM_PAGES = 10;
143 const unsigned long page_size = self->page_size;
144 char *ptr;
145 int i;
146
147 ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE,
148 MAP_PRIVATE | MAP_ANON, -1, 0);
149 ASSERT_NE(ptr, MAP_FAILED);
150
151 /* Trivially assert we can touch the first page. */
152 ASSERT_TRUE(try_read_write_buf(ptr));
153
154 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
155
156 /* Establish that 1st page SIGSEGV's. */
157 ASSERT_FALSE(try_read_write_buf(ptr));
158
159 /* Ensure we can touch everything else.*/
160 for (i = 1; i < NUM_PAGES; i++) {
161 char *curr = &ptr[i * page_size];
162
163 ASSERT_TRUE(try_read_write_buf(curr));
164 }
165
166 /* Establish a guard page at the end of the mapping. */
167 ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
168 MADV_GUARD_INSTALL), 0);
169
170 /* Check that both guard pages result in SIGSEGV. */
171 ASSERT_FALSE(try_read_write_buf(ptr));
172 ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
173
174 /* Remove the first guard page. */
175 ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
176
177 /* Make sure we can touch it. */
178 ASSERT_TRUE(try_read_write_buf(ptr));
179
180 /* Remove the last guard page. */
181 ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
182 MADV_GUARD_REMOVE));
183
184 /* Make sure we can touch it. */
185 ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
186
187 /*
188 * Test setting a _range_ of pages, namely the first 3. The first of
189 * these be faulted in, so this also tests that we can install guard
190 * pages over backed pages.
191 */
192 ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
193
194 /* Make sure they are all guard pages. */
195 for (i = 0; i < 3; i++) {
196 char *curr = &ptr[i * page_size];
197
198 ASSERT_FALSE(try_read_write_buf(curr));
199 }
200
201 /* Make sure the rest are not. */
202 for (i = 3; i < NUM_PAGES; i++) {
203 char *curr = &ptr[i * page_size];
204
205 ASSERT_TRUE(try_read_write_buf(curr));
206 }
207
208 /* Remove guard pages. */
209 ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
210
211 /* Now make sure we can touch everything. */
212 for (i = 0; i < NUM_PAGES; i++) {
213 char *curr = &ptr[i * page_size];
214
215 ASSERT_TRUE(try_read_write_buf(curr));
216 }
217
218 /*
219 * Now remove all guard pages, make sure we don't remove existing
220 * entries.
221 */
222 ASSERT_EQ(madvise(ptr, NUM_PAGES * page_size, MADV_GUARD_REMOVE), 0);
223
224 for (i = 0; i < NUM_PAGES * page_size; i += page_size) {
225 char chr = ptr[i];
226
227 ASSERT_EQ(chr, 'x');
228 }
229
230 ASSERT_EQ(munmap(ptr, NUM_PAGES * page_size), 0);
231 }
232
233 /* Assert that operations applied across multiple VMAs work as expected. */
TEST_F(guard_pages,multi_vma)234 TEST_F(guard_pages, multi_vma)
235 {
236 const unsigned long page_size = self->page_size;
237 char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
238 int i;
239
240 /* Reserve a 100 page region over which we can install VMAs. */
241 ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
242 MAP_ANON | MAP_PRIVATE, -1, 0);
243 ASSERT_NE(ptr_region, MAP_FAILED);
244
245 /* Place a VMA of 10 pages size at the start of the region. */
246 ptr1 = mmap(ptr_region, 10 * page_size, PROT_READ | PROT_WRITE,
247 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
248 ASSERT_NE(ptr1, MAP_FAILED);
249
250 /* Place a VMA of 5 pages size 50 pages into the region. */
251 ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
252 PROT_READ | PROT_WRITE,
253 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
254 ASSERT_NE(ptr2, MAP_FAILED);
255
256 /* Place a VMA of 20 pages size at the end of the region. */
257 ptr3 = mmap(&ptr_region[80 * page_size], 20 * page_size,
258 PROT_READ | PROT_WRITE,
259 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
260 ASSERT_NE(ptr3, MAP_FAILED);
261
262 /* Unmap gaps. */
263 ASSERT_EQ(munmap(&ptr_region[10 * page_size], 40 * page_size), 0);
264 ASSERT_EQ(munmap(&ptr_region[55 * page_size], 25 * page_size), 0);
265
266 /*
267 * We end up with VMAs like this:
268 *
269 * 0 10 .. 50 55 .. 80 100
270 * [---] [---] [---]
271 */
272
273 /*
274 * Now mark the whole range as guard pages and make sure all VMAs are as
275 * such.
276 */
277
278 /*
279 * madvise() is certifiable and lets you perform operations over gaps,
280 * everything works, but it indicates an error and errno is set to
281 * -ENOMEM. Also if anything runs out of memory it is set to
282 * -ENOMEM. You are meant to guess which is which.
283 */
284 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
285 ASSERT_EQ(errno, ENOMEM);
286
287 for (i = 0; i < 10; i++) {
288 char *curr = &ptr1[i * page_size];
289
290 ASSERT_FALSE(try_read_write_buf(curr));
291 }
292
293 for (i = 0; i < 5; i++) {
294 char *curr = &ptr2[i * page_size];
295
296 ASSERT_FALSE(try_read_write_buf(curr));
297 }
298
299 for (i = 0; i < 20; i++) {
300 char *curr = &ptr3[i * page_size];
301
302 ASSERT_FALSE(try_read_write_buf(curr));
303 }
304
305 /* Now remove guar pages over range and assert the opposite. */
306
307 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), -1);
308 ASSERT_EQ(errno, ENOMEM);
309
310 for (i = 0; i < 10; i++) {
311 char *curr = &ptr1[i * page_size];
312
313 ASSERT_TRUE(try_read_write_buf(curr));
314 }
315
316 for (i = 0; i < 5; i++) {
317 char *curr = &ptr2[i * page_size];
318
319 ASSERT_TRUE(try_read_write_buf(curr));
320 }
321
322 for (i = 0; i < 20; i++) {
323 char *curr = &ptr3[i * page_size];
324
325 ASSERT_TRUE(try_read_write_buf(curr));
326 }
327
328 /* Now map incompatible VMAs in the gaps. */
329 ptr = mmap(&ptr_region[10 * page_size], 40 * page_size,
330 PROT_READ | PROT_WRITE | PROT_EXEC,
331 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
332 ASSERT_NE(ptr, MAP_FAILED);
333 ptr = mmap(&ptr_region[55 * page_size], 25 * page_size,
334 PROT_READ | PROT_WRITE | PROT_EXEC,
335 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
336 ASSERT_NE(ptr, MAP_FAILED);
337
338 /*
339 * We end up with VMAs like this:
340 *
341 * 0 10 .. 50 55 .. 80 100
342 * [---][xxxx][---][xxxx][---]
343 *
344 * Where 'x' signifies VMAs that cannot be merged with those adjacent to
345 * them.
346 */
347
348 /* Multiple VMAs adjacent to one another should result in no error. */
349 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0);
350 for (i = 0; i < 100; i++) {
351 char *curr = &ptr_region[i * page_size];
352
353 ASSERT_FALSE(try_read_write_buf(curr));
354 }
355 ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0);
356 for (i = 0; i < 100; i++) {
357 char *curr = &ptr_region[i * page_size];
358
359 ASSERT_TRUE(try_read_write_buf(curr));
360 }
361
362 /* Cleanup. */
363 ASSERT_EQ(munmap(ptr_region, 100 * page_size), 0);
364 }
365
366 /*
367 * Assert that batched operations performed using process_madvise() work as
368 * expected.
369 */
TEST_F(guard_pages,process_madvise)370 TEST_F(guard_pages, process_madvise)
371 {
372 const unsigned long page_size = self->page_size;
373 pid_t pid = getpid();
374 int pidfd = pidfd_open(pid, 0);
375 char *ptr_region, *ptr1, *ptr2, *ptr3;
376 ssize_t count;
377 struct iovec vec[6];
378
379 ASSERT_NE(pidfd, -1);
380
381 /* Reserve region to map over. */
382 ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
383 MAP_ANON | MAP_PRIVATE, -1, 0);
384 ASSERT_NE(ptr_region, MAP_FAILED);
385
386 /*
387 * 10 pages offset 1 page into reserve region. We MAP_POPULATE so we
388 * overwrite existing entries and test this code path against
389 * overwriting existing entries.
390 */
391 ptr1 = mmap(&ptr_region[page_size], 10 * page_size,
392 PROT_READ | PROT_WRITE,
393 MAP_FIXED | MAP_ANON | MAP_PRIVATE | MAP_POPULATE, -1, 0);
394 ASSERT_NE(ptr1, MAP_FAILED);
395 /* We want guard markers at start/end of each VMA. */
396 vec[0].iov_base = ptr1;
397 vec[0].iov_len = page_size;
398 vec[1].iov_base = &ptr1[9 * page_size];
399 vec[1].iov_len = page_size;
400
401 /* 5 pages offset 50 pages into reserve region. */
402 ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
403 PROT_READ | PROT_WRITE,
404 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
405 ASSERT_NE(ptr2, MAP_FAILED);
406 vec[2].iov_base = ptr2;
407 vec[2].iov_len = page_size;
408 vec[3].iov_base = &ptr2[4 * page_size];
409 vec[3].iov_len = page_size;
410
411 /* 20 pages offset 79 pages into reserve region. */
412 ptr3 = mmap(&ptr_region[79 * page_size], 20 * page_size,
413 PROT_READ | PROT_WRITE,
414 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
415 ASSERT_NE(ptr3, MAP_FAILED);
416 vec[4].iov_base = ptr3;
417 vec[4].iov_len = page_size;
418 vec[5].iov_base = &ptr3[19 * page_size];
419 vec[5].iov_len = page_size;
420
421 /* Free surrounding VMAs. */
422 ASSERT_EQ(munmap(ptr_region, page_size), 0);
423 ASSERT_EQ(munmap(&ptr_region[11 * page_size], 39 * page_size), 0);
424 ASSERT_EQ(munmap(&ptr_region[55 * page_size], 24 * page_size), 0);
425 ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
426
427 /* Now guard in one step. */
428 count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0);
429
430 /* OK we don't have permission to do this, skip. */
431 if (count == -1 && errno == EPERM)
432 ksft_exit_skip("No process_madvise() permissions, try running as root.\n");
433
434 /* Returns the number of bytes advised. */
435 ASSERT_EQ(count, 6 * page_size);
436
437 /* Now make sure the guarding was applied. */
438
439 ASSERT_FALSE(try_read_write_buf(ptr1));
440 ASSERT_FALSE(try_read_write_buf(&ptr1[9 * page_size]));
441
442 ASSERT_FALSE(try_read_write_buf(ptr2));
443 ASSERT_FALSE(try_read_write_buf(&ptr2[4 * page_size]));
444
445 ASSERT_FALSE(try_read_write_buf(ptr3));
446 ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
447
448 /* Now do the same with unguard... */
449 count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0);
450
451 /* ...and everything should now succeed. */
452
453 ASSERT_TRUE(try_read_write_buf(ptr1));
454 ASSERT_TRUE(try_read_write_buf(&ptr1[9 * page_size]));
455
456 ASSERT_TRUE(try_read_write_buf(ptr2));
457 ASSERT_TRUE(try_read_write_buf(&ptr2[4 * page_size]));
458
459 ASSERT_TRUE(try_read_write_buf(ptr3));
460 ASSERT_TRUE(try_read_write_buf(&ptr3[19 * page_size]));
461
462 /* Cleanup. */
463 ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
464 ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
465 ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
466 close(pidfd);
467 }
468
469 /* Assert that unmapping ranges does not leave guard markers behind. */
TEST_F(guard_pages,munmap)470 TEST_F(guard_pages, munmap)
471 {
472 const unsigned long page_size = self->page_size;
473 char *ptr, *ptr_new1, *ptr_new2;
474
475 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
476 MAP_ANON | MAP_PRIVATE, -1, 0);
477 ASSERT_NE(ptr, MAP_FAILED);
478
479 /* Guard first and last pages. */
480 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
481 ASSERT_EQ(madvise(&ptr[9 * page_size], page_size, MADV_GUARD_INSTALL), 0);
482
483 /* Assert that they are guarded. */
484 ASSERT_FALSE(try_read_write_buf(ptr));
485 ASSERT_FALSE(try_read_write_buf(&ptr[9 * page_size]));
486
487 /* Unmap them. */
488 ASSERT_EQ(munmap(ptr, page_size), 0);
489 ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
490
491 /* Map over them.*/
492 ptr_new1 = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
493 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
494 ASSERT_NE(ptr_new1, MAP_FAILED);
495 ptr_new2 = mmap(&ptr[9 * page_size], page_size, PROT_READ | PROT_WRITE,
496 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
497 ASSERT_NE(ptr_new2, MAP_FAILED);
498
499 /* Assert that they are now not guarded. */
500 ASSERT_TRUE(try_read_write_buf(ptr_new1));
501 ASSERT_TRUE(try_read_write_buf(ptr_new2));
502
503 /* Cleanup. */
504 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
505 }
506
507 /* Assert that mprotect() operations have no bearing on guard markers. */
TEST_F(guard_pages,mprotect)508 TEST_F(guard_pages, mprotect)
509 {
510 const unsigned long page_size = self->page_size;
511 char *ptr;
512 int i;
513
514 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
515 MAP_ANON | MAP_PRIVATE, -1, 0);
516 ASSERT_NE(ptr, MAP_FAILED);
517
518 /* Guard the middle of the range. */
519 ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
520 MADV_GUARD_INSTALL), 0);
521
522 /* Assert that it is indeed guarded. */
523 ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
524 ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
525
526 /* Now make these pages read-only. */
527 ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
528
529 /* Make sure the range is still guarded. */
530 ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
531 ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
532
533 /* Make sure we can guard again without issue.*/
534 ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
535 MADV_GUARD_INSTALL), 0);
536
537 /* Make sure the range is, yet again, still guarded. */
538 ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
539 ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
540
541 /* Now unguard the whole range. */
542 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
543
544 /* Make sure the whole range is readable. */
545 for (i = 0; i < 10; i++) {
546 char *curr = &ptr[i * page_size];
547
548 ASSERT_TRUE(try_read_buf(curr));
549 }
550
551 /* Cleanup. */
552 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
553 }
554
555 /* Split and merge VMAs and make sure guard pages still behave. */
TEST_F(guard_pages,split_merge)556 TEST_F(guard_pages, split_merge)
557 {
558 const unsigned long page_size = self->page_size;
559 char *ptr, *ptr_new;
560 int i;
561
562 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
563 MAP_ANON | MAP_PRIVATE, -1, 0);
564 ASSERT_NE(ptr, MAP_FAILED);
565
566 /* Guard the whole range. */
567 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
568
569 /* Make sure the whole range is guarded. */
570 for (i = 0; i < 10; i++) {
571 char *curr = &ptr[i * page_size];
572
573 ASSERT_FALSE(try_read_write_buf(curr));
574 }
575
576 /* Now unmap some pages in the range so we split. */
577 ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
578 ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
579 ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
580
581 /* Make sure the remaining ranges are guarded post-split. */
582 for (i = 0; i < 2; i++) {
583 char *curr = &ptr[i * page_size];
584
585 ASSERT_FALSE(try_read_write_buf(curr));
586 }
587 for (i = 2; i < 5; i++) {
588 char *curr = &ptr[i * page_size];
589
590 ASSERT_FALSE(try_read_write_buf(curr));
591 }
592 for (i = 6; i < 8; i++) {
593 char *curr = &ptr[i * page_size];
594
595 ASSERT_FALSE(try_read_write_buf(curr));
596 }
597 for (i = 9; i < 10; i++) {
598 char *curr = &ptr[i * page_size];
599
600 ASSERT_FALSE(try_read_write_buf(curr));
601 }
602
603 /* Now map them again - the unmap will have cleared the guards. */
604 ptr_new = mmap(&ptr[2 * page_size], page_size, PROT_READ | PROT_WRITE,
605 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
606 ASSERT_NE(ptr_new, MAP_FAILED);
607 ptr_new = mmap(&ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
608 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
609 ASSERT_NE(ptr_new, MAP_FAILED);
610 ptr_new = mmap(&ptr[8 * page_size], page_size, PROT_READ | PROT_WRITE,
611 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
612 ASSERT_NE(ptr_new, MAP_FAILED);
613
614 /* Now make sure guard pages are established. */
615 for (i = 0; i < 10; i++) {
616 char *curr = &ptr[i * page_size];
617 bool result = try_read_write_buf(curr);
618 bool expect_true = i == 2 || i == 5 || i == 8;
619
620 ASSERT_TRUE(expect_true ? result : !result);
621 }
622
623 /* Now guard everything again. */
624 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
625
626 /* Make sure the whole range is guarded. */
627 for (i = 0; i < 10; i++) {
628 char *curr = &ptr[i * page_size];
629
630 ASSERT_FALSE(try_read_write_buf(curr));
631 }
632
633 /* Now split the range into three. */
634 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
635 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
636
637 /* Make sure the whole range is guarded for read. */
638 for (i = 0; i < 10; i++) {
639 char *curr = &ptr[i * page_size];
640
641 ASSERT_FALSE(try_read_buf(curr));
642 }
643
644 /* Now reset protection bits so we merge the whole thing. */
645 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
646 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
647 PROT_READ | PROT_WRITE), 0);
648
649 /* Make sure the whole range is still guarded. */
650 for (i = 0; i < 10; i++) {
651 char *curr = &ptr[i * page_size];
652
653 ASSERT_FALSE(try_read_write_buf(curr));
654 }
655
656 /* Split range into 3 again... */
657 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ), 0);
658 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size, PROT_READ), 0);
659
660 /* ...and unguard the whole range. */
661 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
662
663 /* Make sure the whole range is remedied for read. */
664 for (i = 0; i < 10; i++) {
665 char *curr = &ptr[i * page_size];
666
667 ASSERT_TRUE(try_read_buf(curr));
668 }
669
670 /* Merge them again. */
671 ASSERT_EQ(mprotect(ptr, 3 * page_size, PROT_READ | PROT_WRITE), 0);
672 ASSERT_EQ(mprotect(&ptr[7 * page_size], 3 * page_size,
673 PROT_READ | PROT_WRITE), 0);
674
675 /* Now ensure the merged range is remedied for read/write. */
676 for (i = 0; i < 10; i++) {
677 char *curr = &ptr[i * page_size];
678
679 ASSERT_TRUE(try_read_write_buf(curr));
680 }
681
682 /* Cleanup. */
683 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
684 }
685
686 /* Assert that MADV_DONTNEED does not remove guard markers. */
TEST_F(guard_pages,dontneed)687 TEST_F(guard_pages, dontneed)
688 {
689 const unsigned long page_size = self->page_size;
690 char *ptr;
691 int i;
692
693 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
694 MAP_ANON | MAP_PRIVATE, -1, 0);
695 ASSERT_NE(ptr, MAP_FAILED);
696
697 /* Back the whole range. */
698 for (i = 0; i < 10; i++) {
699 char *curr = &ptr[i * page_size];
700
701 *curr = 'y';
702 }
703
704 /* Guard every other page. */
705 for (i = 0; i < 10; i += 2) {
706 char *curr = &ptr[i * page_size];
707 int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
708
709 ASSERT_EQ(res, 0);
710 }
711
712 /* Indicate that we don't need any of the range. */
713 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
714
715 /* Check to ensure guard markers are still in place. */
716 for (i = 0; i < 10; i++) {
717 char *curr = &ptr[i * page_size];
718 bool result = try_read_buf(curr);
719
720 if (i % 2 == 0) {
721 ASSERT_FALSE(result);
722 } else {
723 ASSERT_TRUE(result);
724 /* Make sure we really did get reset to zero page. */
725 ASSERT_EQ(*curr, '\0');
726 }
727
728 /* Now write... */
729 result = try_write_buf(&ptr[i * page_size]);
730
731 /* ...and make sure same result. */
732 ASSERT_TRUE(i % 2 != 0 ? result : !result);
733 }
734
735 /* Cleanup. */
736 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
737 }
738
739 /* Assert that mlock()'ed pages work correctly with guard markers. */
TEST_F(guard_pages,mlock)740 TEST_F(guard_pages, mlock)
741 {
742 const unsigned long page_size = self->page_size;
743 char *ptr;
744 int i;
745
746 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
747 MAP_ANON | MAP_PRIVATE, -1, 0);
748 ASSERT_NE(ptr, MAP_FAILED);
749
750 /* Populate. */
751 for (i = 0; i < 10; i++) {
752 char *curr = &ptr[i * page_size];
753
754 *curr = 'y';
755 }
756
757 /* Lock. */
758 ASSERT_EQ(mlock(ptr, 10 * page_size), 0);
759
760 /* Now try to guard, should fail with EINVAL. */
761 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
762 ASSERT_EQ(errno, EINVAL);
763
764 /* OK unlock. */
765 ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
766
767 /* Guard first half of range, should now succeed. */
768 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
769
770 /* Make sure guard works. */
771 for (i = 0; i < 10; i++) {
772 char *curr = &ptr[i * page_size];
773 bool result = try_read_write_buf(curr);
774
775 if (i < 5) {
776 ASSERT_FALSE(result);
777 } else {
778 ASSERT_TRUE(result);
779 ASSERT_EQ(*curr, 'x');
780 }
781 }
782
783 /*
784 * Now lock the latter part of the range. We can't lock the guard pages,
785 * as this would result in the pages being populated and the guarding
786 * would cause this to error out.
787 */
788 ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
789
790 /*
791 * Now remove guard pages, we permit mlock()'d ranges to have guard
792 * pages removed as it is a non-destructive operation.
793 */
794 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
795
796 /* Now check that no guard pages remain. */
797 for (i = 0; i < 10; i++) {
798 char *curr = &ptr[i * page_size];
799
800 ASSERT_TRUE(try_read_write_buf(curr));
801 }
802
803 /* Cleanup. */
804 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
805 }
806
807 /*
808 * Assert that moving, extending and shrinking memory via mremap() retains
809 * guard markers where possible.
810 *
811 * - Moving a mapping alone should retain markers as they are.
812 */
TEST_F(guard_pages,mremap_move)813 TEST_F(guard_pages, mremap_move)
814 {
815 const unsigned long page_size = self->page_size;
816 char *ptr, *ptr_new;
817
818 /* Map 5 pages. */
819 ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
820 MAP_ANON | MAP_PRIVATE, -1, 0);
821 ASSERT_NE(ptr, MAP_FAILED);
822
823 /* Place guard markers at both ends of the 5 page span. */
824 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
825 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
826
827 /* Make sure the guard pages are in effect. */
828 ASSERT_FALSE(try_read_write_buf(ptr));
829 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
830
831 /* Map a new region we will move this range into. Doing this ensures
832 * that we have reserved a range to map into.
833 */
834 ptr_new = mmap(NULL, 5 * page_size, PROT_NONE, MAP_ANON | MAP_PRIVATE,
835 -1, 0);
836 ASSERT_NE(ptr_new, MAP_FAILED);
837
838 ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
839 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new), ptr_new);
840
841 /* Make sure the guard markers are retained. */
842 ASSERT_FALSE(try_read_write_buf(ptr_new));
843 ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
844
845 /*
846 * Clean up - we only need reference the new pointer as we overwrote the
847 * PROT_NONE range and moved the existing one.
848 */
849 munmap(ptr_new, 5 * page_size);
850 }
851
852 /*
853 * Assert that moving, extending and shrinking memory via mremap() retains
854 * guard markers where possible.
855 *
856 * Expanding should retain guard pages, only now in different position. The user
857 * will have to remove guard pages manually to fix up (they'd have to do the
858 * same if it were a PROT_NONE mapping).
859 */
TEST_F(guard_pages,mremap_expand)860 TEST_F(guard_pages, mremap_expand)
861 {
862 const unsigned long page_size = self->page_size;
863 char *ptr, *ptr_new;
864
865 /* Map 10 pages... */
866 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
867 MAP_ANON | MAP_PRIVATE, -1, 0);
868 ASSERT_NE(ptr, MAP_FAILED);
869 /* ...But unmap the last 5 so we can ensure we can expand into them. */
870 ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
871
872 /* Place guard markers at both ends of the 5 page span. */
873 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
874 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
875
876 /* Make sure the guarding is in effect. */
877 ASSERT_FALSE(try_read_write_buf(ptr));
878 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
879
880 /* Now expand to 10 pages. */
881 ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
882 ASSERT_NE(ptr, MAP_FAILED);
883
884 /*
885 * Make sure the guard markers are retained in their original positions.
886 */
887 ASSERT_FALSE(try_read_write_buf(ptr));
888 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
889
890 /* Reserve a region which we can move to and expand into. */
891 ptr_new = mmap(NULL, 20 * page_size, PROT_NONE,
892 MAP_ANON | MAP_PRIVATE, -1, 0);
893 ASSERT_NE(ptr_new, MAP_FAILED);
894
895 /* Now move and expand into it. */
896 ptr = mremap(ptr, 10 * page_size, 20 * page_size,
897 MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
898 ASSERT_EQ(ptr, ptr_new);
899
900 /*
901 * Again, make sure the guard markers are retained in their original positions.
902 */
903 ASSERT_FALSE(try_read_write_buf(ptr));
904 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
905
906 /*
907 * A real user would have to remove guard markers, but would reasonably
908 * expect all characteristics of the mapping to be retained, including
909 * guard markers.
910 */
911
912 /* Cleanup. */
913 munmap(ptr, 20 * page_size);
914 }
915 /*
916 * Assert that moving, extending and shrinking memory via mremap() retains
917 * guard markers where possible.
918 *
919 * Shrinking will result in markers that are shrunk over being removed. Again,
920 * if the user were using a PROT_NONE mapping they'd have to manually fix this
921 * up also so this is OK.
922 */
TEST_F(guard_pages,mremap_shrink)923 TEST_F(guard_pages, mremap_shrink)
924 {
925 const unsigned long page_size = self->page_size;
926 char *ptr;
927 int i;
928
929 /* Map 5 pages. */
930 ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
931 MAP_ANON | MAP_PRIVATE, -1, 0);
932 ASSERT_NE(ptr, MAP_FAILED);
933
934 /* Place guard markers at both ends of the 5 page span. */
935 ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
936 ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
937
938 /* Make sure the guarding is in effect. */
939 ASSERT_FALSE(try_read_write_buf(ptr));
940 ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
941
942 /* Now shrink to 3 pages. */
943 ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
944 ASSERT_NE(ptr, MAP_FAILED);
945
946 /* We expect the guard marker at the start to be retained... */
947 ASSERT_FALSE(try_read_write_buf(ptr));
948
949 /* ...But remaining pages will not have guard markers. */
950 for (i = 1; i < 3; i++) {
951 char *curr = &ptr[i * page_size];
952
953 ASSERT_TRUE(try_read_write_buf(curr));
954 }
955
956 /*
957 * As with expansion, a real user would have to remove guard pages and
958 * fixup. But you'd have to do similar manual things with PROT_NONE
959 * mappings too.
960 */
961
962 /*
963 * If we expand back to the original size, the end marker will, of
964 * course, no longer be present.
965 */
966 ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
967 ASSERT_NE(ptr, MAP_FAILED);
968
969 /* Again, we expect the guard marker at the start to be retained... */
970 ASSERT_FALSE(try_read_write_buf(ptr));
971
972 /* ...But remaining pages will not have guard markers. */
973 for (i = 1; i < 5; i++) {
974 char *curr = &ptr[i * page_size];
975
976 ASSERT_TRUE(try_read_write_buf(curr));
977 }
978
979 /* Cleanup. */
980 munmap(ptr, 5 * page_size);
981 }
982
983 /*
984 * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
985 * retain guard pages.
986 */
TEST_F(guard_pages,fork)987 TEST_F(guard_pages, fork)
988 {
989 const unsigned long page_size = self->page_size;
990 char *ptr;
991 pid_t pid;
992 int i;
993
994 /* Map 10 pages. */
995 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
996 MAP_ANON | MAP_PRIVATE, -1, 0);
997 ASSERT_NE(ptr, MAP_FAILED);
998
999 /* Establish guard pages in the first 5 pages. */
1000 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1001
1002 pid = fork();
1003 ASSERT_NE(pid, -1);
1004 if (!pid) {
1005 /* This is the child process now. */
1006
1007 /* Assert that the guarding is in effect. */
1008 for (i = 0; i < 10; i++) {
1009 char *curr = &ptr[i * page_size];
1010 bool result = try_read_write_buf(curr);
1011
1012 ASSERT_TRUE(i >= 5 ? result : !result);
1013 }
1014
1015 /* Now unguard the range.*/
1016 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1017
1018 exit(0);
1019 }
1020
1021 /* Parent process. */
1022
1023 /* Parent simply waits on child. */
1024 waitpid(pid, NULL, 0);
1025
1026 /* Child unguard does not impact parent page table state. */
1027 for (i = 0; i < 10; i++) {
1028 char *curr = &ptr[i * page_size];
1029 bool result = try_read_write_buf(curr);
1030
1031 ASSERT_TRUE(i >= 5 ? result : !result);
1032 }
1033
1034 /* Cleanup. */
1035 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1036 }
1037
1038 /*
1039 * Assert expected behaviour after we fork populated ranges of anonymous memory
1040 * and then guard and unguard the range.
1041 */
TEST_F(guard_pages,fork_cow)1042 TEST_F(guard_pages, fork_cow)
1043 {
1044 const unsigned long page_size = self->page_size;
1045 char *ptr;
1046 pid_t pid;
1047 int i;
1048
1049 /* Map 10 pages. */
1050 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1051 MAP_ANON | MAP_PRIVATE, -1, 0);
1052 ASSERT_NE(ptr, MAP_FAILED);
1053
1054 /* Populate range. */
1055 for (i = 0; i < 10 * page_size; i++) {
1056 char chr = 'a' + (i % 26);
1057
1058 ptr[i] = chr;
1059 }
1060
1061 pid = fork();
1062 ASSERT_NE(pid, -1);
1063 if (!pid) {
1064 /* This is the child process now. */
1065
1066 /* Ensure the range is as expected. */
1067 for (i = 0; i < 10 * page_size; i++) {
1068 char expected = 'a' + (i % 26);
1069 char actual = ptr[i];
1070
1071 ASSERT_EQ(actual, expected);
1072 }
1073
1074 /* Establish guard pages across the whole range. */
1075 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1076 /* Remove it. */
1077 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
1078
1079 /*
1080 * By removing the guard pages, the page tables will be
1081 * cleared. Assert that we are looking at the zero page now.
1082 */
1083 for (i = 0; i < 10 * page_size; i++) {
1084 char actual = ptr[i];
1085
1086 ASSERT_EQ(actual, '\0');
1087 }
1088
1089 exit(0);
1090 }
1091
1092 /* Parent process. */
1093
1094 /* Parent simply waits on child. */
1095 waitpid(pid, NULL, 0);
1096
1097 /* Ensure the range is unchanged in parent anon range. */
1098 for (i = 0; i < 10 * page_size; i++) {
1099 char expected = 'a' + (i % 26);
1100 char actual = ptr[i];
1101
1102 ASSERT_EQ(actual, expected);
1103 }
1104
1105 /* Cleanup. */
1106 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1107 }
1108
1109 /*
1110 * Assert that forking a process with VMAs that do have VM_WIPEONFORK set
1111 * behave as expected.
1112 */
TEST_F(guard_pages,fork_wipeonfork)1113 TEST_F(guard_pages, fork_wipeonfork)
1114 {
1115 const unsigned long page_size = self->page_size;
1116 char *ptr;
1117 pid_t pid;
1118 int i;
1119
1120 /* Map 10 pages. */
1121 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1122 MAP_ANON | MAP_PRIVATE, -1, 0);
1123 ASSERT_NE(ptr, MAP_FAILED);
1124
1125 /* Mark wipe on fork. */
1126 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_WIPEONFORK), 0);
1127
1128 /* Guard the first 5 pages. */
1129 ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
1130
1131 pid = fork();
1132 ASSERT_NE(pid, -1);
1133 if (!pid) {
1134 /* This is the child process now. */
1135
1136 /* Guard will have been wiped. */
1137 for (i = 0; i < 10; i++) {
1138 char *curr = &ptr[i * page_size];
1139
1140 ASSERT_TRUE(try_read_write_buf(curr));
1141 }
1142
1143 exit(0);
1144 }
1145
1146 /* Parent process. */
1147
1148 waitpid(pid, NULL, 0);
1149
1150 /* Guard markers should be in effect.*/
1151 for (i = 0; i < 10; i++) {
1152 char *curr = &ptr[i * page_size];
1153 bool result = try_read_write_buf(curr);
1154
1155 ASSERT_TRUE(i >= 5 ? result : !result);
1156 }
1157
1158 /* Cleanup. */
1159 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1160 }
1161
1162 /* Ensure that MADV_FREE retains guard entries as expected. */
TEST_F(guard_pages,lazyfree)1163 TEST_F(guard_pages, lazyfree)
1164 {
1165 const unsigned long page_size = self->page_size;
1166 char *ptr;
1167 int i;
1168
1169 /* Map 10 pages. */
1170 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1171 MAP_ANON | MAP_PRIVATE, -1, 0);
1172 ASSERT_NE(ptr, MAP_FAILED);
1173
1174 /* Guard range. */
1175 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1176
1177 /* Ensure guarded. */
1178 for (i = 0; i < 10; i++) {
1179 char *curr = &ptr[i * page_size];
1180
1181 ASSERT_FALSE(try_read_write_buf(curr));
1182 }
1183
1184 /* Lazyfree range. */
1185 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_FREE), 0);
1186
1187 /* This should leave the guard markers in place. */
1188 for (i = 0; i < 10; i++) {
1189 char *curr = &ptr[i * page_size];
1190
1191 ASSERT_FALSE(try_read_write_buf(curr));
1192 }
1193
1194 /* Cleanup. */
1195 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1196 }
1197
1198 /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
TEST_F(guard_pages,populate)1199 TEST_F(guard_pages, populate)
1200 {
1201 const unsigned long page_size = self->page_size;
1202 char *ptr;
1203
1204 /* Map 10 pages. */
1205 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1206 MAP_ANON | MAP_PRIVATE, -1, 0);
1207 ASSERT_NE(ptr, MAP_FAILED);
1208
1209 /* Guard range. */
1210 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1211
1212 /* Populate read should error out... */
1213 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_READ), -1);
1214 ASSERT_EQ(errno, EFAULT);
1215
1216 /* ...as should populate write. */
1217 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_POPULATE_WRITE), -1);
1218 ASSERT_EQ(errno, EFAULT);
1219
1220 /* Cleanup. */
1221 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1222 }
1223
1224 /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
TEST_F(guard_pages,cold_pageout)1225 TEST_F(guard_pages, cold_pageout)
1226 {
1227 const unsigned long page_size = self->page_size;
1228 char *ptr;
1229 int i;
1230
1231 /* Map 10 pages. */
1232 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1233 MAP_ANON | MAP_PRIVATE, -1, 0);
1234 ASSERT_NE(ptr, MAP_FAILED);
1235
1236 /* Guard range. */
1237 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1238
1239 /* Ensured guarded. */
1240 for (i = 0; i < 10; i++) {
1241 char *curr = &ptr[i * page_size];
1242
1243 ASSERT_FALSE(try_read_write_buf(curr));
1244 }
1245
1246 /* Now mark cold. This should have no impact on guard markers. */
1247 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_COLD), 0);
1248
1249 /* Should remain guarded. */
1250 for (i = 0; i < 10; i++) {
1251 char *curr = &ptr[i * page_size];
1252
1253 ASSERT_FALSE(try_read_write_buf(curr));
1254 }
1255
1256 /* OK, now page out. This should equally, have no effect on markers. */
1257 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
1258
1259 /* Should remain guarded. */
1260 for (i = 0; i < 10; i++) {
1261 char *curr = &ptr[i * page_size];
1262
1263 ASSERT_FALSE(try_read_write_buf(curr));
1264 }
1265
1266 /* Cleanup. */
1267 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1268 }
1269
1270 /* Ensure that guard pages do not break userfaultd. */
TEST_F(guard_pages,uffd)1271 TEST_F(guard_pages, uffd)
1272 {
1273 const unsigned long page_size = self->page_size;
1274 int uffd;
1275 char *ptr;
1276 int i;
1277 struct uffdio_api api = {
1278 .api = UFFD_API,
1279 .features = 0,
1280 };
1281 struct uffdio_register reg;
1282 struct uffdio_range range;
1283
1284 /* Set up uffd. */
1285 uffd = userfaultfd(0);
1286 if (uffd == -1 && errno == EPERM)
1287 ksft_exit_skip("No userfaultfd permissions, try running as root.\n");
1288 ASSERT_NE(uffd, -1);
1289
1290 ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
1291
1292 /* Map 10 pages. */
1293 ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
1294 MAP_ANON | MAP_PRIVATE, -1, 0);
1295 ASSERT_NE(ptr, MAP_FAILED);
1296
1297 /* Register the range with uffd. */
1298 range.start = (unsigned long)ptr;
1299 range.len = 10 * page_size;
1300 reg.range = range;
1301 reg.mode = UFFDIO_REGISTER_MODE_MISSING;
1302 ASSERT_EQ(ioctl(uffd, UFFDIO_REGISTER, ®), 0);
1303
1304 /* Guard the range. This should not trigger the uffd. */
1305 ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0);
1306
1307 /* The guarding should behave as usual with no uffd intervention. */
1308 for (i = 0; i < 10; i++) {
1309 char *curr = &ptr[i * page_size];
1310
1311 ASSERT_FALSE(try_read_write_buf(curr));
1312 }
1313
1314 /* Cleanup. */
1315 ASSERT_EQ(ioctl(uffd, UFFDIO_UNREGISTER, &range), 0);
1316 close(uffd);
1317 ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
1318 }
1319
1320 TEST_HARNESS_MAIN
1321