Lines Matching +full:re +full:- +full:initialization

1 // SPDX-License-Identifier: GPL-2.0
16 #include <linux/kcsan-checks.h>
78 *((unsigned long *)kp->arg) = num; in param_set_sample_interval()
81 return disabled_by_warn ? -EINVAL : kfence_enable_late(); in param_set_sample_interval()
120 * Per-object metadata, with one-to-one mapping of object metadata to
129 * kfence_metadata visible after initialization is successful. This prevents
151 * Assuming a range of 15%-85% unique allocations in the pool at any point in
152 * time, the below parameters provide a probablity of 0.02-0.33 for false
155 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
161 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
230 * currently contained (non-zero count) in Counting Bloom filter.
257 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; in metadata_to_pageaddr()
260 /* The checks do not affect performance; only called from slow-paths. */ in metadata_to_pageaddr()
271 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) in metadata_to_pageaddr()
279 enum kfence_object_state state = READ_ONCE(meta->state); in kfence_obj_allocated()
293 next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track; in metadata_update_state()
295 lockdep_assert_held(&meta->lock); in metadata_update_state()
298 if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING) in metadata_update_state()
302 memcpy(track->stack_entries, stack_entries, in metadata_update_state()
309 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); in metadata_update_state()
311 track->num_stack_entries = num_stack_entries; in metadata_update_state()
312 track->pid = task_pid_nr(current); in metadata_update_state()
313 track->cpu = raw_smp_processor_id(); in metadata_update_state()
314 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ in metadata_update_state()
322 WRITE_ONCE(meta->state, next); in metadata_update_state()
343 raw_spin_lock_irqsave(&meta->lock, flags); in check_canary_byte()
345 raw_spin_unlock_irqrestore(&meta->lock, flags); in check_canary_byte()
352 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); in set_canary()
359 for (; addr < meta->addr; addr += sizeof(u64)) in set_canary()
362 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); in set_canary()
363 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) in set_canary()
370 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); in check_canary()
374 * We'll iterate over each canary byte per-side until a corrupted byte in check_canary()
383 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { in check_canary()
393 for (; addr < meta->addr; addr++) { in check_canary()
399 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) { in check_canary()
403 for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { in check_canary()
406 for (; addr - pageaddr < PAGE_SIZE; addr++) { in check_canary()
430 list_del_init(&meta->list); in kfence_guarded_alloc()
438 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { in kfence_guarded_alloc()
440 * This is extremely unlikely -- we are reporting on a in kfence_guarded_alloc()
441 * use-after-free, which locked meta->lock, and the reporting in kfence_guarded_alloc()
443 * kfence_alloc() and tries to grab the same object that we're in kfence_guarded_alloc()
450 list_add_tail(&meta->list, &kfence_freelist); in kfence_guarded_alloc()
456 meta->addr = metadata_to_pageaddr(meta); in kfence_guarded_alloc()
457 /* Unprotect if we're reusing this page. */ in kfence_guarded_alloc()
458 if (meta->state == KFENCE_OBJECT_FREED) in kfence_guarded_alloc()
459 kfence_unprotect(meta->addr); in kfence_guarded_alloc()
462 * Note: for allocations made before RNG initialization, will always in kfence_guarded_alloc()
466 * is that the out-of-bounds accesses detected are deterministic for in kfence_guarded_alloc()
470 /* Allocate on the "right" side, re-calculate address. */ in kfence_guarded_alloc()
471 meta->addr += PAGE_SIZE - size; in kfence_guarded_alloc()
472 meta->addr = ALIGN_DOWN(meta->addr, cache->align); in kfence_guarded_alloc()
475 addr = (void *)meta->addr; in kfence_guarded_alloc()
480 WRITE_ONCE(meta->cache, cache); in kfence_guarded_alloc()
481 meta->size = size; in kfence_guarded_alloc()
482 meta->alloc_stack_hash = alloc_stack_hash; in kfence_guarded_alloc()
483 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_alloc()
488 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
489 slab->slab_cache = cache; in kfence_guarded_alloc()
490 slab->objects = 1; in kfence_guarded_alloc()
492 /* Memory initialization. */ in kfence_guarded_alloc()
497 * SL*B do the initialization, as otherwise we might overwrite KFENCE's in kfence_guarded_alloc()
502 if (cache->ctor) in kfence_guarded_alloc()
503 cache->ctor(addr); in kfence_guarded_alloc()
506 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ in kfence_guarded_alloc()
520 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_guarded_free()
522 if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) { in kfence_guarded_free()
523 /* Invalid or double-free, bail out. */ in kfence_guarded_free()
527 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_free()
531 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ in kfence_guarded_free()
540 if (meta->unprotected_page) { in kfence_guarded_free()
541 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); in kfence_guarded_free()
542 kfence_protect(meta->unprotected_page); in kfence_guarded_free()
543 meta->unprotected_page = 0; in kfence_guarded_free()
548 init = slab_want_init_on_free(meta->cache); in kfence_guarded_free()
549 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_free()
551 alloc_covered_add(meta->alloc_stack_hash, -1); in kfence_guarded_free()
557 * Clear memory if init-on-free is set. While we protect the page, the in kfence_guarded_free()
558 * data is still there, and after a use-after-free is detected, we in kfence_guarded_free()
562 memzero_explicit(addr, meta->size); in kfence_guarded_free()
564 /* Protect to detect use-after-frees. */ in kfence_guarded_free()
571 KFENCE_WARN_ON(!list_empty(&meta->list)); in kfence_guarded_free()
572 list_add_tail(&meta->list, &kfence_freelist); in kfence_guarded_free()
587 kfence_guarded_free((void *)meta->addr, meta, false); in rcu_guarded_free()
591 * Initialization of the KFENCE pool after its allocation.
593 * which partial initialization succeeded.
612 * fast-path in SLUB, and therefore need to ensure kfree() correctly in kfence_init_pool()
613 * enters __slab_free() slow-path. in kfence_init_pool()
623 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | in kfence_init_pool()
645 INIT_LIST_HEAD(&meta->list); in kfence_init_pool()
646 raw_spin_lock_init(&meta->lock); in kfence_init_pool()
647 meta->state = KFENCE_OBJECT_UNUSED; in kfence_init_pool()
648 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ in kfence_init_pool()
649 list_add_tail(&meta->list, &kfence_freelist); in kfence_init_pool()
659 * Make kfence_metadata visible only when initialization is successful. in kfence_init_pool()
660 * Otherwise, if the initialization fails and kfence_metadata is freed, in kfence_init_pool()
673 slab->obj_exts = 0; in kfence_init_pool()
695 * are registered with kmemleak through the slab post-alloc hook. in kfence_init_pool_early()
708 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); in kfence_init_pool_early()
757 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; in show_object()
760 raw_spin_lock_irqsave(&meta->lock, flags); in show_object()
762 raw_spin_unlock_irqrestore(&meta->lock, flags); in show_object()
763 seq_puts(seq, "---------------------------------\n"); in show_object()
821 /* Wait queue to wake up allocation-gate timer task. */
847 atomic_set(&kfence_allocation_gate, -kfence_burst); in toggle_allocation_gate()
870 * re-allocate the memory pool. in kfence_alloc_pool_and_metadata()
905 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, in kfence_init_enable()
932 int err = -ENOMEM; in kfence_init_late()
940 return -ENOMEM; in kfence_init_late()
951 return -EINVAL; in kfence_init_late()
956 return -ENOMEM; in kfence_init_late()
973 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); in kfence_init_late()
974 err = -EBUSY; in kfence_init_late()
1000 pr_info("re-enabled\n"); in kfence_enable_late()
1026 if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta)) in kfence_shutdown_cache()
1029 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_shutdown_cache()
1030 in_use = meta->cache == s && kfence_obj_allocated(meta); in kfence_shutdown_cache()
1031 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_shutdown_cache()
1048 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); in kfence_shutdown_cache()
1056 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) in kfence_shutdown_cache()
1059 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_shutdown_cache()
1060 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) in kfence_shutdown_cache()
1061 meta->cache = NULL; in kfence_shutdown_cache()
1062 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_shutdown_cache()
1083 * Skip allocations from non-default zones, including DMA. We cannot in __kfence_alloc()
1089 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { in __kfence_alloc()
1098 if (s->flags & SLAB_SKIP_KFENCE) in __kfence_alloc()
1124 * Do expensive check for coverage of allocation in slow-path after in __kfence_alloc()
1125 * allocation_gate has already become non-zero, even though it might in __kfence_alloc()
1129 * full, including avoiding long-lived allocations of the same source in __kfence_alloc()
1147 * Read locklessly -- if there is a race with __kfence_alloc(), this is in kfence_ksize()
1148 * either a use-after-free or invalid access. in kfence_ksize()
1150 return meta ? meta->size : 0; in kfence_ksize()
1158 * Read locklessly -- if there is a race with __kfence_alloc(), this is in kfence_object_start()
1159 * either a use-after-free or invalid access. in kfence_object_start()
1161 return meta ? (void *)meta->addr : NULL; in kfence_object_start()
1169 KFENCE_WARN_ON(meta->obj_exts.objcg); in __kfence_free()
1173 * the object, as the object page may be recycled for other-typed in __kfence_free()
1174 * objects once it has been freed. meta->cache may be NULL if the cache in __kfence_free()
1179 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) { in __kfence_free()
1182 raw_spin_lock_irqsave(&meta->lock, flags); in __kfence_free()
1184 raw_spin_unlock_irqrestore(&meta->lock, flags); in __kfence_free()
1185 call_rcu(&meta->rcu_head, rcu_guarded_free); in __kfence_free()
1193 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; in kfence_handle_page_fault()
1211 meta = addr_to_metadata(addr - PAGE_SIZE); in kfence_handle_page_fault()
1215 distance = addr - data_race(meta->addr + meta->size); in kfence_handle_page_fault()
1221 if (!to_report || distance > data_race(meta->addr) - addr) in kfence_handle_page_fault()
1228 raw_spin_lock_irqsave(&to_report->lock, flags); in kfence_handle_page_fault()
1229 to_report->unprotected_page = addr; in kfence_handle_page_fault()
1234 * report this as an OOB -- the report will simply show the in kfence_handle_page_fault()
1242 raw_spin_lock_irqsave(&to_report->lock, flags); in kfence_handle_page_fault()
1247 * use-after-free, with the stack trace showing the place where in kfence_handle_page_fault()
1248 * the object was re-allocated. in kfence_handle_page_fault()
1255 raw_spin_unlock_irqrestore(&to_report->lock, flags); in kfence_handle_page_fault()