Lines Matching +full:page +full:- +full:based

1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
13 struct page;
32 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
112 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
113 static __always_inline void kasan_poison_pages(struct page *page, in kasan_poison_pages() argument
117 __kasan_poison_pages(page, order, init); in kasan_poison_pages()
120 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
121 static __always_inline bool kasan_unpoison_pages(struct page *page, in kasan_unpoison_pages() argument
125 return __kasan_unpoison_pages(page, order, init); in kasan_unpoison_pages()
138 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
156 * kasan_poison_new_object - Repoison a new slab object.
184 * kasan_slab_pre_free - Check whether freeing a slab object is safe.
188 * check for double-free and invalid-free bugs and report them.
205 * kasan_slab_free - Poison, initialize, and quarantine a slab object.
284 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
287 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
288 * @page: Pointer to the page allocation.
291 * This function is intended for kernel subsystems that cache page allocations
295 * page allocations.
302 static __always_inline bool kasan_mempool_poison_pages(struct page *page, in kasan_mempool_poison_pages() argument
306 return __kasan_mempool_poison_pages(page, order, _RET_IP_); in kasan_mempool_poison_pages()
310 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
313 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
314 * @page: Pointer to the page allocation.
317 * This function is intended for kernel subsystems that cache page allocations
320 * This function unpoisons a page allocation that was previously poisoned by
322 * the tag-based modes, this function assigns a new tag to the allocation.
324 static __always_inline void kasan_mempool_unpoison_pages(struct page *page, in kasan_mempool_unpoison_pages() argument
328 __kasan_mempool_unpoison_pages(page, order, _RET_IP_); in kasan_mempool_unpoison_pages()
333 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
344 * This function also performs checks to detect double-free and invalid-free
366 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
376 * initializing the allocation's memory. For the tag-based modes, this function
378 * original tags based on the pointer value.
393 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
406 static inline void kasan_poison_pages(struct page *page, unsigned int order, in kasan_poison_pages() argument
408 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, in kasan_unpoison_pages() argument
454 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) in kasan_mempool_poison_pages() argument
458 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} in kasan_mempool_unpoison_pages() argument
497 /* Tag-based KASAN modes do not use per-object metadata. */
503 /* And no cache-related metadata initialization is required. */
522 * kasan_report - print a report about a bad memory access detected by KASAN