1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Test cases for KFENCE memory safety error detector. Since the interface with
4 * which KFENCE's reports are obtained is via the console, this is the output we
5 * should verify. For each test case checks the presence (or absence) of
6 * generated reports. Relies on 'console' tracepoint to capture reports as they
7 * appear in the kernel log.
8 *
9 * Copyright (C) 2020, Google LLC.
10 * Author: Alexander Potapenko <[email protected]>
11 * Marco Elver <[email protected]>
12 */
13
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/string_choices.h>
24 #include <linux/tracepoint.h>
25 #include <trace/events/printk.h>
26
27 #include <asm/kfence.h>
28
29 #include "kfence.h"
30
31 /* May be overridden by <asm/kfence.h>. */
32 #ifndef arch_kfence_test_address
33 #define arch_kfence_test_address(addr) (addr)
34 #endif
35
36 #define KFENCE_TEST_REQUIRES(test, cond) do { \
37 if (!(cond)) \
38 kunit_skip((test), "Test requires: " #cond); \
39 } while (0)
40
41 /* Report as observed from console. */
42 static struct {
43 spinlock_t lock;
44 int nlines;
45 char lines[2][256];
46 } observed = {
47 .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
48 };
49
50 /* Probe for console output: obtains observed lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)51 static void probe_console(void *ignore, const char *buf, size_t len)
52 {
53 unsigned long flags;
54 int nlines;
55
56 spin_lock_irqsave(&observed.lock, flags);
57 nlines = observed.nlines;
58
59 if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
60 /*
61 * KFENCE report and related to the test.
62 *
63 * The provided @buf is not NUL-terminated; copy no more than
64 * @len bytes and let strscpy() add the missing NUL-terminator.
65 */
66 strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
67 nlines = 1;
68 } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
69 strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
70 }
71
72 WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
73 spin_unlock_irqrestore(&observed.lock, flags);
74 }
75
76 /* Check if a report related to the test exists. */
report_available(void)77 static bool report_available(void)
78 {
79 return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
80 }
81
82 /* Information we expect in a report. */
83 struct expect_report {
84 enum kfence_error_type type; /* The type or error. */
85 void *fn; /* Function pointer to expected function where access occurred. */
86 char *addr; /* Address at which the bad access occurred. */
87 bool is_write; /* Is access a write. */
88 };
89
get_access_type(const struct expect_report * r)90 static const char *get_access_type(const struct expect_report *r)
91 {
92 return str_write_read(r->is_write);
93 }
94
95 /* Check observed report matches information in @r. */
report_matches(const struct expect_report * r)96 static bool report_matches(const struct expect_report *r)
97 {
98 unsigned long addr = (unsigned long)r->addr;
99 bool ret = false;
100 unsigned long flags;
101 typeof(observed.lines) expect;
102 const char *end;
103 char *cur;
104
105 /* Doubled-checked locking. */
106 if (!report_available())
107 return false;
108
109 /* Generate expected report contents. */
110
111 /* Title */
112 cur = expect[0];
113 end = &expect[0][sizeof(expect[0]) - 1];
114 switch (r->type) {
115 case KFENCE_ERROR_OOB:
116 cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
117 get_access_type(r));
118 break;
119 case KFENCE_ERROR_UAF:
120 cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
121 get_access_type(r));
122 break;
123 case KFENCE_ERROR_CORRUPTION:
124 cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
125 break;
126 case KFENCE_ERROR_INVALID:
127 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
128 get_access_type(r));
129 break;
130 case KFENCE_ERROR_INVALID_FREE:
131 cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
132 break;
133 }
134
135 scnprintf(cur, end - cur, " in %pS", r->fn);
136 /* The exact offset won't match, remove it; also strip module name. */
137 cur = strchr(expect[0], '+');
138 if (cur)
139 *cur = '\0';
140
141 /* Access information */
142 cur = expect[1];
143 end = &expect[1][sizeof(expect[1]) - 1];
144
145 switch (r->type) {
146 case KFENCE_ERROR_OOB:
147 cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
148 addr = arch_kfence_test_address(addr);
149 break;
150 case KFENCE_ERROR_UAF:
151 cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
152 addr = arch_kfence_test_address(addr);
153 break;
154 case KFENCE_ERROR_CORRUPTION:
155 cur += scnprintf(cur, end - cur, "Corrupted memory at");
156 break;
157 case KFENCE_ERROR_INVALID:
158 cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
159 addr = arch_kfence_test_address(addr);
160 break;
161 case KFENCE_ERROR_INVALID_FREE:
162 cur += scnprintf(cur, end - cur, "Invalid free of");
163 break;
164 }
165
166 cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
167
168 spin_lock_irqsave(&observed.lock, flags);
169 if (!report_available())
170 goto out; /* A new report is being captured. */
171
172 /* Finally match expected output to what we actually observed. */
173 ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
174 out:
175 spin_unlock_irqrestore(&observed.lock, flags);
176 return ret;
177 }
178
179 /* ===== Test cases ===== */
180
181 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
182
183 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
184 static struct kmem_cache *test_cache;
185
setup_test_cache(struct kunit * test,size_t size,slab_flags_t flags,void (* ctor)(void *))186 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
187 void (*ctor)(void *))
188 {
189 if (test->priv != TEST_PRIV_WANT_MEMCACHE)
190 return size;
191
192 kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
193
194 /*
195 * Use SLAB_NO_MERGE to prevent merging with existing caches.
196 * Use SLAB_ACCOUNT to allocate via memcg, if enabled.
197 */
198 flags |= SLAB_NO_MERGE | SLAB_ACCOUNT;
199 test_cache = kmem_cache_create("test", size, 1, flags, ctor);
200 KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
201
202 return size;
203 }
204
test_cache_destroy(void)205 static void test_cache_destroy(void)
206 {
207 if (!test_cache)
208 return;
209
210 kmem_cache_destroy(test_cache);
211 test_cache = NULL;
212 }
213
kmalloc_cache_alignment(size_t size)214 static inline size_t kmalloc_cache_alignment(size_t size)
215 {
216 /* just to get ->align so no need to pass in the real caller */
217 enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, 0);
218 return kmalloc_caches[type][__kmalloc_index(size, false)]->align;
219 }
220
221 /* Must always inline to match stack trace against caller. */
test_free(void * ptr)222 static __always_inline void test_free(void *ptr)
223 {
224 if (test_cache)
225 kmem_cache_free(test_cache, ptr);
226 else
227 kfree(ptr);
228 }
229
230 /*
231 * If this should be a KFENCE allocation, and on which side the allocation and
232 * the closest guard page should be.
233 */
234 enum allocation_policy {
235 ALLOCATE_ANY, /* KFENCE, any side. */
236 ALLOCATE_LEFT, /* KFENCE, left side of page. */
237 ALLOCATE_RIGHT, /* KFENCE, right side of page. */
238 ALLOCATE_NONE, /* No KFENCE allocation. */
239 };
240
241 /*
242 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
243 * current test_cache if set up.
244 */
test_alloc(struct kunit * test,size_t size,gfp_t gfp,enum allocation_policy policy)245 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
246 {
247 void *alloc;
248 unsigned long timeout, resched_after;
249 const char *policy_name;
250
251 switch (policy) {
252 case ALLOCATE_ANY:
253 policy_name = "any";
254 break;
255 case ALLOCATE_LEFT:
256 policy_name = "left";
257 break;
258 case ALLOCATE_RIGHT:
259 policy_name = "right";
260 break;
261 case ALLOCATE_NONE:
262 policy_name = "none";
263 break;
264 }
265
266 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
267 policy_name, !!test_cache);
268
269 /*
270 * 100x the sample interval should be more than enough to ensure we get
271 * a KFENCE allocation eventually.
272 */
273 timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
274 /*
275 * Especially for non-preemption kernels, ensure the allocation-gate
276 * timer can catch up: after @resched_after, every failed allocation
277 * attempt yields, to ensure the allocation-gate timer is scheduled.
278 */
279 resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
280 do {
281 if (test_cache)
282 alloc = kmem_cache_alloc(test_cache, gfp);
283 else
284 alloc = kmalloc(size, gfp);
285
286 if (is_kfence_address(alloc)) {
287 struct slab *slab = virt_to_slab(alloc);
288 enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, _RET_IP_);
289 struct kmem_cache *s = test_cache ?:
290 kmalloc_caches[type][__kmalloc_index(size, false)];
291
292 /*
293 * Verify that various helpers return the right values
294 * even for KFENCE objects; these are required so that
295 * memcg accounting works correctly.
296 */
297 KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
298 KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
299
300 if (policy == ALLOCATE_ANY)
301 return alloc;
302 if (policy == ALLOCATE_LEFT && PAGE_ALIGNED(alloc))
303 return alloc;
304 if (policy == ALLOCATE_RIGHT && !PAGE_ALIGNED(alloc))
305 return alloc;
306 } else if (policy == ALLOCATE_NONE)
307 return alloc;
308
309 test_free(alloc);
310
311 if (time_after(jiffies, resched_after))
312 cond_resched();
313 } while (time_before(jiffies, timeout));
314
315 KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
316 return NULL; /* Unreachable. */
317 }
318
test_out_of_bounds_read(struct kunit * test)319 static void test_out_of_bounds_read(struct kunit *test)
320 {
321 size_t size = 32;
322 struct expect_report expect = {
323 .type = KFENCE_ERROR_OOB,
324 .fn = test_out_of_bounds_read,
325 .is_write = false,
326 };
327 char *buf;
328
329 setup_test_cache(test, size, 0, NULL);
330
331 /*
332 * If we don't have our own cache, adjust based on alignment, so that we
333 * actually access guard pages on either side.
334 */
335 if (!test_cache)
336 size = kmalloc_cache_alignment(size);
337
338 /* Test both sides. */
339
340 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
341 expect.addr = buf - 1;
342 READ_ONCE(*expect.addr);
343 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
344 test_free(buf);
345
346 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
347 expect.addr = buf + size;
348 READ_ONCE(*expect.addr);
349 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
350 test_free(buf);
351 }
352
test_out_of_bounds_write(struct kunit * test)353 static void test_out_of_bounds_write(struct kunit *test)
354 {
355 size_t size = 32;
356 struct expect_report expect = {
357 .type = KFENCE_ERROR_OOB,
358 .fn = test_out_of_bounds_write,
359 .is_write = true,
360 };
361 char *buf;
362
363 setup_test_cache(test, size, 0, NULL);
364 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
365 expect.addr = buf - 1;
366 WRITE_ONCE(*expect.addr, 42);
367 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
368 test_free(buf);
369 }
370
test_use_after_free_read(struct kunit * test)371 static void test_use_after_free_read(struct kunit *test)
372 {
373 const size_t size = 32;
374 struct expect_report expect = {
375 .type = KFENCE_ERROR_UAF,
376 .fn = test_use_after_free_read,
377 .is_write = false,
378 };
379
380 setup_test_cache(test, size, 0, NULL);
381 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
382 test_free(expect.addr);
383 READ_ONCE(*expect.addr);
384 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
385 }
386
test_use_after_free_read_nofault(struct kunit * test)387 static void test_use_after_free_read_nofault(struct kunit *test)
388 {
389 const size_t size = 32;
390 char *addr;
391 char dst;
392 int ret;
393
394 setup_test_cache(test, size, 0, NULL);
395 addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
396 test_free(addr);
397 /* Use after free with *_nofault() */
398 ret = copy_from_kernel_nofault(&dst, addr, 1);
399 KUNIT_EXPECT_EQ(test, ret, -EFAULT);
400 KUNIT_EXPECT_FALSE(test, report_available());
401 }
402
test_double_free(struct kunit * test)403 static void test_double_free(struct kunit *test)
404 {
405 const size_t size = 32;
406 struct expect_report expect = {
407 .type = KFENCE_ERROR_INVALID_FREE,
408 .fn = test_double_free,
409 };
410
411 setup_test_cache(test, size, 0, NULL);
412 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
413 test_free(expect.addr);
414 test_free(expect.addr); /* Double-free. */
415 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
416 }
417
test_invalid_addr_free(struct kunit * test)418 static void test_invalid_addr_free(struct kunit *test)
419 {
420 const size_t size = 32;
421 struct expect_report expect = {
422 .type = KFENCE_ERROR_INVALID_FREE,
423 .fn = test_invalid_addr_free,
424 };
425 char *buf;
426
427 setup_test_cache(test, size, 0, NULL);
428 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
429 expect.addr = buf + 1; /* Free on invalid address. */
430 test_free(expect.addr); /* Invalid address free. */
431 test_free(buf); /* No error. */
432 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
433 }
434
test_corruption(struct kunit * test)435 static void test_corruption(struct kunit *test)
436 {
437 size_t size = 32;
438 struct expect_report expect = {
439 .type = KFENCE_ERROR_CORRUPTION,
440 .fn = test_corruption,
441 };
442 char *buf;
443
444 setup_test_cache(test, size, 0, NULL);
445
446 /* Test both sides. */
447
448 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
449 expect.addr = buf + size;
450 WRITE_ONCE(*expect.addr, 42);
451 test_free(buf);
452 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
453
454 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
455 expect.addr = buf - 1;
456 WRITE_ONCE(*expect.addr, 42);
457 test_free(buf);
458 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
459 }
460
461 /*
462 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
463 * leave a gap between the object and the guard page. Specifically, an
464 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
465 * respectively. Therefore it is impossible for the allocated object to
466 * contiguously line up with the right guard page.
467 *
468 * However, we test that an access to memory beyond the gap results in KFENCE
469 * detecting an OOB access.
470 */
test_kmalloc_aligned_oob_read(struct kunit * test)471 static void test_kmalloc_aligned_oob_read(struct kunit *test)
472 {
473 const size_t size = 73;
474 const size_t align = kmalloc_cache_alignment(size);
475 struct expect_report expect = {
476 .type = KFENCE_ERROR_OOB,
477 .fn = test_kmalloc_aligned_oob_read,
478 .is_write = false,
479 };
480 char *buf;
481
482 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
483
484 /*
485 * The object is offset to the right, so there won't be an OOB to the
486 * left of it.
487 */
488 READ_ONCE(*(buf - 1));
489 KUNIT_EXPECT_FALSE(test, report_available());
490
491 /*
492 * @buf must be aligned on @align, therefore buf + size belongs to the
493 * same page -> no OOB.
494 */
495 READ_ONCE(*(buf + size));
496 KUNIT_EXPECT_FALSE(test, report_available());
497
498 /* Overflowing by @align bytes will result in an OOB. */
499 expect.addr = buf + size + align;
500 READ_ONCE(*expect.addr);
501 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
502
503 test_free(buf);
504 }
505
test_kmalloc_aligned_oob_write(struct kunit * test)506 static void test_kmalloc_aligned_oob_write(struct kunit *test)
507 {
508 const size_t size = 73;
509 struct expect_report expect = {
510 .type = KFENCE_ERROR_CORRUPTION,
511 .fn = test_kmalloc_aligned_oob_write,
512 };
513 char *buf;
514
515 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
516 /*
517 * The object is offset to the right, so we won't get a page
518 * fault immediately after it.
519 */
520 expect.addr = buf + size;
521 WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
522 KUNIT_EXPECT_FALSE(test, report_available());
523 test_free(buf);
524 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
525 }
526
527 /* Test cache shrinking and destroying with KFENCE. */
test_shrink_memcache(struct kunit * test)528 static void test_shrink_memcache(struct kunit *test)
529 {
530 const size_t size = 32;
531 void *buf;
532
533 setup_test_cache(test, size, 0, NULL);
534 KUNIT_EXPECT_TRUE(test, test_cache);
535 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
536 kmem_cache_shrink(test_cache);
537 test_free(buf);
538
539 KUNIT_EXPECT_FALSE(test, report_available());
540 }
541
ctor_set_x(void * obj)542 static void ctor_set_x(void *obj)
543 {
544 /* Every object has at least 8 bytes. */
545 memset(obj, 'x', 8);
546 }
547
548 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
test_free_bulk(struct kunit * test)549 static void test_free_bulk(struct kunit *test)
550 {
551 int iter;
552
553 for (iter = 0; iter < 5; iter++) {
554 const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307),
555 0, (iter & 1) ? ctor_set_x : NULL);
556 void *objects[] = {
557 test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
558 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
559 test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
560 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
561 test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
562 };
563
564 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
565 KUNIT_ASSERT_FALSE(test, report_available());
566 test_cache_destroy();
567 }
568 }
569
570 /* Test init-on-free works. */
test_init_on_free(struct kunit * test)571 static void test_init_on_free(struct kunit *test)
572 {
573 const size_t size = 32;
574 struct expect_report expect = {
575 .type = KFENCE_ERROR_UAF,
576 .fn = test_init_on_free,
577 .is_write = false,
578 };
579 int i;
580
581 KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON));
582 /* Assume it hasn't been disabled on command line. */
583
584 setup_test_cache(test, size, 0, NULL);
585 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
586 for (i = 0; i < size; i++)
587 expect.addr[i] = i + 1;
588 test_free(expect.addr);
589
590 for (i = 0; i < size; i++) {
591 /*
592 * This may fail if the page was recycled by KFENCE and then
593 * written to again -- this however, is near impossible with a
594 * default config.
595 */
596 KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
597
598 if (!i) /* Only check first access to not fail test if page is ever re-protected. */
599 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
600 }
601 }
602
603 /* Ensure that constructors work properly. */
test_memcache_ctor(struct kunit * test)604 static void test_memcache_ctor(struct kunit *test)
605 {
606 const size_t size = 32;
607 char *buf;
608 int i;
609
610 setup_test_cache(test, size, 0, ctor_set_x);
611 buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
612
613 for (i = 0; i < 8; i++)
614 KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
615
616 test_free(buf);
617
618 KUNIT_EXPECT_FALSE(test, report_available());
619 }
620
621 /* Test that memory is zeroed if requested. */
test_gfpzero(struct kunit * test)622 static void test_gfpzero(struct kunit *test)
623 {
624 const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
625 char *buf1, *buf2;
626 int i;
627
628 /* Skip if we think it'd take too long. */
629 KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
630
631 setup_test_cache(test, size, 0, NULL);
632 buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
633 for (i = 0; i < size; i++)
634 buf1[i] = i + 1;
635 test_free(buf1);
636
637 /* Try to get same address again -- this can take a while. */
638 for (i = 0;; i++) {
639 buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
640 if (buf1 == buf2)
641 break;
642 test_free(buf2);
643
644 if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
645 kunit_warn(test, "giving up ... cannot get same object back\n");
646 return;
647 }
648 cond_resched();
649 }
650
651 for (i = 0; i < size; i++)
652 KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
653
654 test_free(buf2);
655
656 KUNIT_EXPECT_FALSE(test, report_available());
657 }
658
test_invalid_access(struct kunit * test)659 static void test_invalid_access(struct kunit *test)
660 {
661 const struct expect_report expect = {
662 .type = KFENCE_ERROR_INVALID,
663 .fn = test_invalid_access,
664 .addr = &__kfence_pool[10],
665 .is_write = false,
666 };
667
668 READ_ONCE(__kfence_pool[10]);
669 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
670 }
671
672 /* Test SLAB_TYPESAFE_BY_RCU works. */
test_memcache_typesafe_by_rcu(struct kunit * test)673 static void test_memcache_typesafe_by_rcu(struct kunit *test)
674 {
675 const size_t size = 32;
676 struct expect_report expect = {
677 .type = KFENCE_ERROR_UAF,
678 .fn = test_memcache_typesafe_by_rcu,
679 .is_write = false,
680 };
681
682 setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
683 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
684
685 expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
686 *expect.addr = 42;
687
688 rcu_read_lock();
689 test_free(expect.addr);
690 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
691 /*
692 * Up to this point, memory should not have been freed yet, and
693 * therefore there should be no KFENCE report from the above access.
694 */
695 rcu_read_unlock();
696
697 /* Above access to @expect.addr should not have generated a report! */
698 KUNIT_EXPECT_FALSE(test, report_available());
699
700 /* Only after rcu_barrier() is the memory guaranteed to be freed. */
701 rcu_barrier();
702
703 /* Expect use-after-free. */
704 KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
705 KUNIT_EXPECT_TRUE(test, report_matches(&expect));
706 }
707
708 /* Test krealloc(). */
test_krealloc(struct kunit * test)709 static void test_krealloc(struct kunit *test)
710 {
711 const size_t size = 32;
712 const struct expect_report expect = {
713 .type = KFENCE_ERROR_UAF,
714 .fn = test_krealloc,
715 .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
716 .is_write = false,
717 };
718 char *buf = expect.addr;
719 int i;
720
721 KUNIT_EXPECT_FALSE(test, test_cache);
722 KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
723 for (i = 0; i < size; i++)
724 buf[i] = i + 1;
725
726 /* Check that we successfully change the size. */
727 buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
728 /* Note: Might no longer be a KFENCE alloc. */
729 KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
730 for (i = 0; i < size; i++)
731 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
732 for (; i < size * 3; i++) /* Fill to extra bytes. */
733 buf[i] = i + 1;
734
735 buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
736 KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
737 for (i = 0; i < size * 2; i++)
738 KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
739
740 buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
741 KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
742 KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
743
744 READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
745 KUNIT_ASSERT_TRUE(test, report_matches(&expect));
746 }
747
748 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
test_memcache_alloc_bulk(struct kunit * test)749 static void test_memcache_alloc_bulk(struct kunit *test)
750 {
751 const size_t size = 32;
752 bool pass = false;
753 unsigned long timeout;
754
755 setup_test_cache(test, size, 0, NULL);
756 KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
757 /*
758 * 100x the sample interval should be more than enough to ensure we get
759 * a KFENCE allocation eventually.
760 */
761 timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
762 do {
763 void *objects[100];
764 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
765 objects);
766 if (!num)
767 continue;
768 for (i = 0; i < ARRAY_SIZE(objects); i++) {
769 if (is_kfence_address(objects[i])) {
770 pass = true;
771 break;
772 }
773 }
774 kmem_cache_free_bulk(test_cache, num, objects);
775 /*
776 * kmem_cache_alloc_bulk() disables interrupts, and calling it
777 * in a tight loop may not give KFENCE a chance to switch the
778 * static branch. Call cond_resched() to let KFENCE chime in.
779 */
780 cond_resched();
781 } while (!pass && time_before(jiffies, timeout));
782
783 KUNIT_EXPECT_TRUE(test, pass);
784 KUNIT_EXPECT_FALSE(test, report_available());
785 }
786
787 /*
788 * KUnit does not provide a way to provide arguments to tests, and we encode
789 * additional info in the name. Set up 2 tests per test case, one using the
790 * default allocator, and another using a custom memcache (suffix '-memcache').
791 */
792 #define KFENCE_KUNIT_CASE(test_name) \
793 { .run_case = test_name, .name = #test_name }, \
794 { .run_case = test_name, .name = #test_name "-memcache" }
795
796 static struct kunit_case kfence_test_cases[] = {
797 KFENCE_KUNIT_CASE(test_out_of_bounds_read),
798 KFENCE_KUNIT_CASE(test_out_of_bounds_write),
799 KFENCE_KUNIT_CASE(test_use_after_free_read),
800 KFENCE_KUNIT_CASE(test_use_after_free_read_nofault),
801 KFENCE_KUNIT_CASE(test_double_free),
802 KFENCE_KUNIT_CASE(test_invalid_addr_free),
803 KFENCE_KUNIT_CASE(test_corruption),
804 KFENCE_KUNIT_CASE(test_free_bulk),
805 KFENCE_KUNIT_CASE(test_init_on_free),
806 KUNIT_CASE(test_kmalloc_aligned_oob_read),
807 KUNIT_CASE(test_kmalloc_aligned_oob_write),
808 KUNIT_CASE(test_shrink_memcache),
809 KUNIT_CASE(test_memcache_ctor),
810 KUNIT_CASE(test_invalid_access),
811 KUNIT_CASE(test_gfpzero),
812 KUNIT_CASE(test_memcache_typesafe_by_rcu),
813 KUNIT_CASE(test_krealloc),
814 KUNIT_CASE(test_memcache_alloc_bulk),
815 {},
816 };
817
818 /* ===== End test cases ===== */
819
test_init(struct kunit * test)820 static int test_init(struct kunit *test)
821 {
822 unsigned long flags;
823 int i;
824
825 if (!__kfence_pool)
826 return -EINVAL;
827
828 spin_lock_irqsave(&observed.lock, flags);
829 for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
830 observed.lines[i][0] = '\0';
831 observed.nlines = 0;
832 spin_unlock_irqrestore(&observed.lock, flags);
833
834 /* Any test with 'memcache' in its name will want a memcache. */
835 if (strstr(test->name, "memcache"))
836 test->priv = TEST_PRIV_WANT_MEMCACHE;
837 else
838 test->priv = NULL;
839
840 return 0;
841 }
842
test_exit(struct kunit * test)843 static void test_exit(struct kunit *test)
844 {
845 test_cache_destroy();
846 }
847
kfence_suite_init(struct kunit_suite * suite)848 static int kfence_suite_init(struct kunit_suite *suite)
849 {
850 register_trace_console(probe_console, NULL);
851 return 0;
852 }
853
kfence_suite_exit(struct kunit_suite * suite)854 static void kfence_suite_exit(struct kunit_suite *suite)
855 {
856 unregister_trace_console(probe_console, NULL);
857 tracepoint_synchronize_unregister();
858 }
859
860 static struct kunit_suite kfence_test_suite = {
861 .name = "kfence",
862 .test_cases = kfence_test_cases,
863 .init = test_init,
864 .exit = test_exit,
865 .suite_init = kfence_suite_init,
866 .suite_exit = kfence_suite_exit,
867 };
868
869 kunit_test_suites(&kfence_test_suite);
870
871 MODULE_LICENSE("GPL v2");
872 MODULE_AUTHOR("Alexander Potapenko <[email protected]>, Marco Elver <[email protected]>");
873 MODULE_DESCRIPTION("kfence unit test suite");
874