Lines Matching +full:page +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0-only
30 #include <asm/page.h>
40 /* Fields set based on lines observed in the console. */
66 return -1; in kasan_suite_init()
73 * Temporarily enable multi-shot mode. Otherwise, KASAN would only in kasan_suite_init()
97 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
103 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
104 * checking is auto-disabled. When this happens, this test handler reenables
166 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in kmalloc_oob_right()
180 * An aligned access into the first out-of-bounds granule that falls in kmalloc_oob_right()
185 /* Out-of-bounds access past the aligned kmalloc object. */ in kmalloc_oob_right()
201 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); in kmalloc_oob_left()
221 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_track_caller_oob_right()
224 * Check that KASAN detects out-of-bounds access for object allocated via in kmalloc_track_caller_oob_right()
236 * Check that KASAN detects out-of-bounds access for object allocated via in kmalloc_track_caller_oob_right()
249 * Check that KASAN detects an out-of-bounds access for a big object allocated
255 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; in kmalloc_big_oob_right()
311 struct page *pages; in page_alloc_oob_right()
316 * With generic KASAN page allocations have no redzones, thus in page_alloc_oob_right()
317 * out-of-bounds detection is not guaranteed. in page_alloc_oob_right()
333 struct page *pages; in page_alloc_uaf()
351 middle = size1 + (size2 - size1) / 2; in krealloc_more_oob_helper()
359 /* Suppress -Warray-bounds warnings. */ in krealloc_more_oob_helper()
363 ptr2[size1 - 1] = 'x'; in krealloc_more_oob_helper()
366 ptr2[size2 - 1] = 'x'; in krealloc_more_oob_helper()
386 middle = size2 + (size1 - size2) / 2; in krealloc_less_oob_helper()
394 /* Suppress -Warray-bounds warnings. */ in krealloc_less_oob_helper()
398 ptr2[size2 - 1] = 'x'; in krealloc_less_oob_helper()
417 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); in krealloc_less_oob_helper()
446 * Check that krealloc() detects a use-after-free, returns NULL,
476 ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0); in kmalloc_oob_16()
517 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_2()
528 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size)); in kmalloc_oob_memset_2()
535 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_4()
546 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size)); in kmalloc_oob_memset_4()
553 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_8()
564 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size)); in kmalloc_oob_memset_8()
571 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_16()
582 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size)); in kmalloc_oob_memset_16()
589 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_in_memset()
607 size_t invalid_size = -2; in kmalloc_memmove_negative_size()
612 * Hardware tag-based mode doesn't check memmove for negative size. in kmalloc_memmove_negative_size()
613 * As a result, this test introduces a side-effect memory corruption, in kmalloc_memmove_negative_size()
696 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. in kmalloc_uaf2()
711 * Check that KASAN detects use-after-free when another object was allocated in
712 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
719 /* This test is specifically crafted for tag-based modes. */ in kmalloc_uaf3()
827 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in ksize_unpoisons_memory()
840 ptr[size - 1] = 'x'; in ksize_unpoisons_memory()
846 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); in ksize_unpoisons_memory()
852 * Check that a use-after-free is detected by ksize() and via normal accesses
858 int size = 128 - KASAN_GRANULE_SIZE; in ksize_uaf()
889 ((volatile struct kasan_rcu_info *)fp)->i; in rcu_uaf_reclaim()
903 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); in rcu_uaf()
928 ((volatile struct work_struct *)work)->data); in workqueue_uaf()
935 struct page *page; in kfree_via_page() local
941 page = virt_to_page(ptr); in kfree_via_page()
943 kfree(page_address(page) + offset); in kfree_via_page()
1053 /* Free the object - this will internally schedule an RCU callback. */ in kmem_cache_rcu_uaf()
1058 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side in kmem_cache_rcu_uaf()
1135 p[i][0] = p[i][size - 1] = 42; in kmem_cache_bulk()
1154 * list when the tests trigger double-free and invalid-free bugs. in mempool_prepare_kmalloc()
1177 * Do not allocate one preallocated element, as we skip the double-free in mempool_prepare_slab()
1178 * and invalid-free tests for slab mempool for simplicity. in mempool_prepare_slab()
1222 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in mempool_kmalloc_oob_right()
1262 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1263 * allocations have no redzones, and thus the out-of-bounds detection is not
1265 * the tag-based KASAN modes, the neighboring allocation might have the same
1269 static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page) in mempool_uaf_helper() argument
1278 ptr = page ? page_address((struct page *)elem) : elem; in mempool_uaf_helper()
1433 * Skip the invalid-free test for page mempool. The invalid-free detection only
1434 * works for compound pages and mempool preallocates all page elements without
1443 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS in kasan_global_oob_right()
1466 char *p = array - 3; in kasan_global_oob_left()
1495 char *p = array - 1; in kasan_alloca_oob_left()
1649 * below accesses are still out-of-bounds, since bitops are defined to in kasan_bitops_generic()
1666 /* This test is specifically crafted for tag-based modes. */ in kasan_bitops_tags()
1669 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ in kasan_bitops_tags()
1684 /* This test is intended for tag-based modes. */ in vmalloc_helpers_tags()
1721 struct page *page; in vmalloc_oob() local
1722 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; in vmalloc_oob()
1735 * We have to be careful not to hit the guard page in vmalloc tests. in vmalloc_oob()
1739 /* Make sure in-bounds accesses are valid. */ in vmalloc_oob()
1741 v_ptr[size - 1] = 0; in vmalloc_oob()
1750 /* An aligned access into the first out-of-bounds granule. */ in vmalloc_oob()
1753 /* Check that in-bounds accesses to the physical page are valid. */ in vmalloc_oob()
1754 page = vmalloc_to_page(v_ptr); in vmalloc_oob()
1755 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vmalloc_oob()
1756 p_ptr = page_address(page); in vmalloc_oob()
1763 * We can't check for use-after-unmap bugs in this nor in the following in vmalloc_oob()
1764 * vmalloc tests, as the page might be fully unmapped and accessing it in vmalloc_oob()
1772 struct page *p_page, *v_page; in vmap_tags()
1775 * This test is specifically crafted for the software tag-based mode, in vmap_tags()
1776 * the only tag-based mode that poisons vmap mappings. in vmap_tags()
1794 * We can't check for out-of-bounds bugs in this nor in the following in vmap_tags()
1795 * vmalloc tests, as allocations have page granularity and accessing in vmap_tags()
1796 * the guard page will crash the kernel. in vmap_tags()
1802 /* Make sure that in-bounds accesses through both pointers work. */ in vmap_tags()
1806 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ in vmap_tags()
1818 struct page *page; in vm_map_ram_tags() local
1821 * This test is specifically crafted for the software tag-based mode, in vm_map_ram_tags()
1822 * the only tag-based mode that poisons vm_map_ram mappings. in vm_map_ram_tags()
1826 page = alloc_pages(GFP_KERNEL, 1); in vm_map_ram_tags()
1827 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vm_map_ram_tags()
1828 p_ptr = page_address(page); in vm_map_ram_tags()
1831 v_ptr = vm_map_ram(&page, 1, -1); in vm_map_ram_tags()
1837 /* Make sure that in-bounds accesses through both pointers work. */ in vm_map_ram_tags()
1847 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1853 struct page *pages; in match_all_not_assigned()
1890 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1916 /* Check that there are no match-all memory tags for tag-based modes. */
1931 * For Software Tag-Based KASAN, skip the majority of tag in match_all_mem_tag()
1935 tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8) in match_all_mem_tag()
1954 * Check that Rust performing a use-after-free using `unsafe` is detected.
1975 ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL); in copy_to_kernel_nofault_oob()
2003 size_t size = 128 - KASAN_GRANULE_SIZE; in copy_user_test_oob()
2035 * bailing out on '\0' before it reaches out-of-bounds. in copy_user_test_oob()