1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kernel.h>
9 #include <linux/static_key.h>
10 #include <linux/types.h>
11 
12 struct kmem_cache;
13 struct page;
14 struct slab;
15 struct vm_struct;
16 struct task_struct;
17 
18 #ifdef CONFIG_KASAN
19 
20 #include <linux/linkage.h>
21 #include <asm/kasan.h>
22 
23 #endif
24 
25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26 
27 #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
28 #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
29 #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
30 #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
31 
32 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
33 #define KASAN_VMALLOC_TLB_FLUSH  0x2 /* TLB flush */
34 
35 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
36 
37 #include <linux/pgtable.h>
38 
39 /* Software KASAN implementations use shadow memory. */
40 
41 #ifdef CONFIG_KASAN_SW_TAGS
42 /* This matches KASAN_TAG_INVALID. */
43 #define KASAN_SHADOW_INIT 0xFE
44 #else
45 #define KASAN_SHADOW_INIT 0
46 #endif
47 
48 #ifndef PTE_HWTABLE_PTRS
49 #define PTE_HWTABLE_PTRS 0
50 #endif
51 
52 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
53 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
54 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
55 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
56 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
57 
58 int kasan_populate_early_shadow(const void *shadow_start,
59 				const void *shadow_end);
60 
61 #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)62 static inline void *kasan_mem_to_shadow(const void *addr)
63 {
64 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
65 		+ KASAN_SHADOW_OFFSET;
66 }
67 #endif
68 
69 int kasan_add_zero_shadow(void *start, unsigned long size);
70 void kasan_remove_zero_shadow(void *start, unsigned long size);
71 
72 /* Enable reporting bugs after kasan_disable_current() */
73 extern void kasan_enable_current(void);
74 
75 /* Disable reporting bugs for current task */
76 extern void kasan_disable_current(void);
77 
78 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
79 
kasan_add_zero_shadow(void * start,unsigned long size)80 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
81 {
82 	return 0;
83 }
kasan_remove_zero_shadow(void * start,unsigned long size)84 static inline void kasan_remove_zero_shadow(void *start,
85 					unsigned long size)
86 {}
87 
kasan_enable_current(void)88 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)89 static inline void kasan_disable_current(void) {}
90 
91 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
92 
93 #ifdef CONFIG_KASAN_HW_TAGS
94 
95 #else /* CONFIG_KASAN_HW_TAGS */
96 
97 #endif /* CONFIG_KASAN_HW_TAGS */
98 
kasan_has_integrated_init(void)99 static inline bool kasan_has_integrated_init(void)
100 {
101 	return kasan_hw_tags_enabled();
102 }
103 
104 #ifdef CONFIG_KASAN
105 void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)106 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
107 {
108 	if (kasan_enabled())
109 		__kasan_unpoison_range(addr, size);
110 }
111 
112 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)113 static __always_inline void kasan_poison_pages(struct page *page,
114 						unsigned int order, bool init)
115 {
116 	if (kasan_enabled())
117 		__kasan_poison_pages(page, order, init);
118 }
119 
120 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)121 static __always_inline bool kasan_unpoison_pages(struct page *page,
122 						 unsigned int order, bool init)
123 {
124 	if (kasan_enabled())
125 		return __kasan_unpoison_pages(page, order, init);
126 	return false;
127 }
128 
129 void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)130 static __always_inline void kasan_poison_slab(struct slab *slab)
131 {
132 	if (kasan_enabled())
133 		__kasan_poison_slab(slab);
134 }
135 
136 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
137 /**
138  * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
139  * @cache: Cache the object belong to.
140  * @object: Pointer to the object.
141  *
142  * This function is intended for the slab allocator's internal use. It
143  * temporarily unpoisons an object from a newly allocated slab without doing
144  * anything else. The object must later be repoisoned by
145  * kasan_poison_new_object().
146  */
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)147 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
148 							void *object)
149 {
150 	if (kasan_enabled())
151 		__kasan_unpoison_new_object(cache, object);
152 }
153 
154 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
155 /**
156  * kasan_poison_new_object - Repoison a new slab object.
157  * @cache: Cache the object belong to.
158  * @object: Pointer to the object.
159  *
160  * This function is intended for the slab allocator's internal use. It
161  * repoisons an object that was previously unpoisoned by
162  * kasan_unpoison_new_object() without doing anything else.
163  */
kasan_poison_new_object(struct kmem_cache * cache,void * object)164 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
165 							void *object)
166 {
167 	if (kasan_enabled())
168 		__kasan_poison_new_object(cache, object);
169 }
170 
171 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
172 					  const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)173 static __always_inline void * __must_check kasan_init_slab_obj(
174 				struct kmem_cache *cache, const void *object)
175 {
176 	if (kasan_enabled())
177 		return __kasan_init_slab_obj(cache, object);
178 	return (void *)object;
179 }
180 
181 bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
182 			unsigned long ip);
183 /**
184  * kasan_slab_pre_free - Check whether freeing a slab object is safe.
185  * @object: Object to be freed.
186  *
187  * This function checks whether freeing the given object is safe. It may
188  * check for double-free and invalid-free bugs and report them.
189  *
190  * This function is intended only for use by the slab allocator.
191  *
192  * @Return true if freeing the object is unsafe; false otherwise.
193  */
kasan_slab_pre_free(struct kmem_cache * s,void * object)194 static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
195 						void *object)
196 {
197 	if (kasan_enabled())
198 		return __kasan_slab_pre_free(s, object, _RET_IP_);
199 	return false;
200 }
201 
202 bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
203 		       bool still_accessible);
204 /**
205  * kasan_slab_free - Poison, initialize, and quarantine a slab object.
206  * @object: Object to be freed.
207  * @init: Whether to initialize the object.
208  * @still_accessible: Whether the object contents are still accessible.
209  *
210  * This function informs that a slab object has been freed and is not
211  * supposed to be accessed anymore, except when @still_accessible is set
212  * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
213  * grace period might not have passed yet).
214  *
215  * For KASAN modes that have integrated memory initialization
216  * (kasan_has_integrated_init() == true), this function also initializes
217  * the object's memory. For other modes, the @init argument is ignored.
218  *
219  * This function might also take ownership of the object to quarantine it.
220  * When this happens, KASAN will defer freeing the object to a later
221  * stage and handle it internally until then. The return value indicates
222  * whether KASAN took ownership of the object.
223  *
224  * This function is intended only for use by the slab allocator.
225  *
226  * @Return true if KASAN took ownership of the object; false otherwise.
227  */
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible)228 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
229 						void *object, bool init,
230 						bool still_accessible)
231 {
232 	if (kasan_enabled())
233 		return __kasan_slab_free(s, object, init, still_accessible);
234 	return false;
235 }
236 
237 void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)238 static __always_inline void kasan_kfree_large(void *ptr)
239 {
240 	if (kasan_enabled())
241 		__kasan_kfree_large(ptr, _RET_IP_);
242 }
243 
244 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
245 				       void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)246 static __always_inline void * __must_check kasan_slab_alloc(
247 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
248 {
249 	if (kasan_enabled())
250 		return __kasan_slab_alloc(s, object, flags, init);
251 	return object;
252 }
253 
254 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
255 				    size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)256 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
257 				const void *object, size_t size, gfp_t flags)
258 {
259 	if (kasan_enabled())
260 		return __kasan_kmalloc(s, object, size, flags);
261 	return (void *)object;
262 }
263 
264 void * __must_check __kasan_kmalloc_large(const void *ptr,
265 					  size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)266 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
267 						      size_t size, gfp_t flags)
268 {
269 	if (kasan_enabled())
270 		return __kasan_kmalloc_large(ptr, size, flags);
271 	return (void *)ptr;
272 }
273 
274 void * __must_check __kasan_krealloc(const void *object,
275 				     size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)276 static __always_inline void * __must_check kasan_krealloc(const void *object,
277 						 size_t new_size, gfp_t flags)
278 {
279 	if (kasan_enabled())
280 		return __kasan_krealloc(object, new_size, flags);
281 	return (void *)object;
282 }
283 
284 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
285 				  unsigned long ip);
286 /**
287  * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
288  * @page: Pointer to the page allocation.
289  * @order: Order of the allocation.
290  *
291  * This function is intended for kernel subsystems that cache page allocations
292  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
293  *
294  * This function is similar to kasan_mempool_poison_object() but operates on
295  * page allocations.
296  *
297  * Before the poisoned allocation can be reused, it must be unpoisoned via
298  * kasan_mempool_unpoison_pages().
299  *
300  * Return: true if the allocation can be safely reused; false otherwise.
301  */
kasan_mempool_poison_pages(struct page * page,unsigned int order)302 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
303 						       unsigned int order)
304 {
305 	if (kasan_enabled())
306 		return __kasan_mempool_poison_pages(page, order, _RET_IP_);
307 	return true;
308 }
309 
310 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
311 				    unsigned long ip);
312 /**
313  * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
314  * @page: Pointer to the page allocation.
315  * @order: Order of the allocation.
316  *
317  * This function is intended for kernel subsystems that cache page allocations
318  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
319  *
320  * This function unpoisons a page allocation that was previously poisoned by
321  * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
322  * the tag-based modes, this function assigns a new tag to the allocation.
323  */
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)324 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
325 							 unsigned int order)
326 {
327 	if (kasan_enabled())
328 		__kasan_mempool_unpoison_pages(page, order, _RET_IP_);
329 }
330 
331 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
332 /**
333  * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
334  * @ptr: Pointer to the slab allocation.
335  *
336  * This function is intended for kernel subsystems that cache slab allocations
337  * to reuse them instead of freeing them back to the slab allocator (e.g.
338  * mempool).
339  *
340  * This function poisons a slab allocation and saves a free stack trace for it
341  * without initializing the allocation's memory and without putting it into the
342  * quarantine (for the Generic mode).
343  *
344  * This function also performs checks to detect double-free and invalid-free
345  * bugs and reports them. The caller can use the return value of this function
346  * to find out if the allocation is buggy.
347  *
348  * Before the poisoned allocation can be reused, it must be unpoisoned via
349  * kasan_mempool_unpoison_object().
350  *
351  * This function operates on all slab allocations including large kmalloc
352  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
353  * size > KMALLOC_MAX_SIZE).
354  *
355  * Return: true if the allocation can be safely reused; false otherwise.
356  */
kasan_mempool_poison_object(void * ptr)357 static __always_inline bool kasan_mempool_poison_object(void *ptr)
358 {
359 	if (kasan_enabled())
360 		return __kasan_mempool_poison_object(ptr, _RET_IP_);
361 	return true;
362 }
363 
364 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
365 /**
366  * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
367  * @ptr: Pointer to the slab allocation.
368  * @size: Size to be unpoisoned.
369  *
370  * This function is intended for kernel subsystems that cache slab allocations
371  * to reuse them instead of freeing them back to the slab allocator (e.g.
372  * mempool).
373  *
374  * This function unpoisons a slab allocation that was previously poisoned via
375  * kasan_mempool_poison_object() and saves an alloc stack trace for it without
376  * initializing the allocation's memory. For the tag-based modes, this function
377  * does not assign a new tag to the allocation and instead restores the
378  * original tags based on the pointer value.
379  *
380  * This function operates on all slab allocations including large kmalloc
381  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
382  * size > KMALLOC_MAX_SIZE).
383  */
kasan_mempool_unpoison_object(void * ptr,size_t size)384 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
385 							  size_t size)
386 {
387 	if (kasan_enabled())
388 		__kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
389 }
390 
391 /*
392  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
393  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
394  */
395 bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)396 static __always_inline bool kasan_check_byte(const void *addr)
397 {
398 	if (kasan_enabled())
399 		return __kasan_check_byte(addr, _RET_IP_);
400 	return true;
401 }
402 
403 #else /* CONFIG_KASAN */
404 
kasan_unpoison_range(const void * address,size_t size)405 static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)406 static inline void kasan_poison_pages(struct page *page, unsigned int order,
407 				      bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)408 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
409 					bool init)
410 {
411 	return false;
412 }
kasan_poison_slab(struct slab * slab)413 static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)414 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
415 					void *object) {}
kasan_poison_new_object(struct kmem_cache * cache,void * object)416 static inline void kasan_poison_new_object(struct kmem_cache *cache,
417 					void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)418 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
419 				const void *object)
420 {
421 	return (void *)object;
422 }
423 
kasan_slab_pre_free(struct kmem_cache * s,void * object)424 static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
425 {
426 	return false;
427 }
428 
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible)429 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
430 				   bool init, bool still_accessible)
431 {
432 	return false;
433 }
kasan_kfree_large(void * ptr)434 static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)435 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
436 				   gfp_t flags, bool init)
437 {
438 	return object;
439 }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)440 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
441 				size_t size, gfp_t flags)
442 {
443 	return (void *)object;
444 }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)445 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
446 {
447 	return (void *)ptr;
448 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)449 static inline void *kasan_krealloc(const void *object, size_t new_size,
450 				 gfp_t flags)
451 {
452 	return (void *)object;
453 }
kasan_mempool_poison_pages(struct page * page,unsigned int order)454 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
455 {
456 	return true;
457 }
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)458 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
kasan_mempool_poison_object(void * ptr)459 static inline bool kasan_mempool_poison_object(void *ptr)
460 {
461 	return true;
462 }
kasan_mempool_unpoison_object(void * ptr,size_t size)463 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
464 
kasan_check_byte(const void * address)465 static inline bool kasan_check_byte(const void *address)
466 {
467 	return true;
468 }
469 
470 #endif /* CONFIG_KASAN */
471 
472 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
473 void kasan_unpoison_task_stack(struct task_struct *task);
474 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
475 #else
kasan_unpoison_task_stack(struct task_struct * task)476 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
kasan_unpoison_task_stack_below(const void * watermark)477 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
478 #endif
479 
480 #ifdef CONFIG_KASAN_GENERIC
481 
482 struct kasan_cache {
483 	int alloc_meta_offset;
484 	int free_meta_offset;
485 };
486 
487 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
488 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
489 			slab_flags_t *flags);
490 
491 void kasan_cache_shrink(struct kmem_cache *cache);
492 void kasan_cache_shutdown(struct kmem_cache *cache);
493 void kasan_record_aux_stack(void *ptr);
494 
495 #else /* CONFIG_KASAN_GENERIC */
496 
497 /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)498 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
499 						bool in_object)
500 {
501 	return 0;
502 }
503 /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)504 static inline void kasan_cache_create(struct kmem_cache *cache,
505 				      unsigned int *size,
506 				      slab_flags_t *flags) {}
507 
kasan_cache_shrink(struct kmem_cache * cache)508 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)509 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)510 static inline void kasan_record_aux_stack(void *ptr) {}
511 
512 #endif /* CONFIG_KASAN_GENERIC */
513 
514 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
515 
kasan_reset_tag(const void * addr)516 static inline void *kasan_reset_tag(const void *addr)
517 {
518 	return (void *)arch_kasan_reset_tag(addr);
519 }
520 
521 /**
522  * kasan_report - print a report about a bad memory access detected by KASAN
523  * @addr: address of the bad access
524  * @size: size of the bad access
525  * @is_write: whether the bad access is a write or a read
526  * @ip: instruction pointer for the accessibility check or the bad access itself
527  */
528 bool kasan_report(const void *addr, size_t size,
529 		bool is_write, unsigned long ip);
530 
531 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
532 
kasan_reset_tag(const void * addr)533 static inline void *kasan_reset_tag(const void *addr)
534 {
535 	return (void *)addr;
536 }
537 
538 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
539 
540 #ifdef CONFIG_KASAN_HW_TAGS
541 
542 void kasan_report_async(void);
543 
544 #endif /* CONFIG_KASAN_HW_TAGS */
545 
546 #ifdef CONFIG_KASAN_SW_TAGS
547 void __init kasan_init_sw_tags(void);
548 #else
kasan_init_sw_tags(void)549 static inline void kasan_init_sw_tags(void) { }
550 #endif
551 
552 #ifdef CONFIG_KASAN_HW_TAGS
553 void kasan_init_hw_tags_cpu(void);
554 void __init kasan_init_hw_tags(void);
555 #else
kasan_init_hw_tags_cpu(void)556 static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)557 static inline void kasan_init_hw_tags(void) { }
558 #endif
559 
560 #ifdef CONFIG_KASAN_VMALLOC
561 
562 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
563 
564 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
565 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
566 void kasan_release_vmalloc(unsigned long start, unsigned long end,
567 			   unsigned long free_region_start,
568 			   unsigned long free_region_end,
569 			   unsigned long flags);
570 
571 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
572 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)573 static inline void kasan_populate_early_vm_area_shadow(void *start,
574 						       unsigned long size)
575 { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)576 static inline int kasan_populate_vmalloc(unsigned long start,
577 					unsigned long size)
578 {
579 	return 0;
580 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)581 static inline void kasan_release_vmalloc(unsigned long start,
582 					 unsigned long end,
583 					 unsigned long free_region_start,
584 					 unsigned long free_region_end,
585 					 unsigned long flags) { }
586 
587 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
588 
589 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
590 			       kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)591 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
592 						unsigned long size,
593 						kasan_vmalloc_flags_t flags)
594 {
595 	if (kasan_enabled())
596 		return __kasan_unpoison_vmalloc(start, size, flags);
597 	return (void *)start;
598 }
599 
600 void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)601 static __always_inline void kasan_poison_vmalloc(const void *start,
602 						 unsigned long size)
603 {
604 	if (kasan_enabled())
605 		__kasan_poison_vmalloc(start, size);
606 }
607 
608 #else /* CONFIG_KASAN_VMALLOC */
609 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)610 static inline void kasan_populate_early_vm_area_shadow(void *start,
611 						       unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)612 static inline int kasan_populate_vmalloc(unsigned long start,
613 					unsigned long size)
614 {
615 	return 0;
616 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)617 static inline void kasan_release_vmalloc(unsigned long start,
618 					 unsigned long end,
619 					 unsigned long free_region_start,
620 					 unsigned long free_region_end,
621 					 unsigned long flags) { }
622 
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)623 static inline void *kasan_unpoison_vmalloc(const void *start,
624 					   unsigned long size,
625 					   kasan_vmalloc_flags_t flags)
626 {
627 	return (void *)start;
628 }
kasan_poison_vmalloc(const void * start,unsigned long size)629 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
630 { }
631 
632 #endif /* CONFIG_KASAN_VMALLOC */
633 
634 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
635 		!defined(CONFIG_KASAN_VMALLOC)
636 
637 /*
638  * These functions allocate and free shadow memory for kernel modules.
639  * They are only required when KASAN_VMALLOC is not supported, as otherwise
640  * shadow memory is allocated by the generic vmalloc handlers.
641  */
642 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
643 void kasan_free_module_shadow(const struct vm_struct *vm);
644 
645 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
646 
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)647 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)648 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
649 
650 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
651 
652 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
653 void kasan_non_canonical_hook(unsigned long addr);
654 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)655 static inline void kasan_non_canonical_hook(unsigned long addr) { }
656 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
657 
658 #endif /* LINUX_KASAN_H */
659