1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * powerpc KFENCE support.
4 *
5 * Copyright (C) 2020 CS GROUP France
6 */
7
8 #ifndef __ASM_POWERPC_KFENCE_H
9 #define __ASM_POWERPC_KFENCE_H
10
11 #include <linux/mm.h>
12 #include <asm/pgtable.h>
13
14 #ifdef CONFIG_PPC64_ELF_ABI_V1
15 #define ARCH_FUNC_PREFIX "."
16 #endif
17
18 extern bool kfence_early_init;
19 extern bool kfence_disabled;
20
disable_kfence(void)21 static inline void disable_kfence(void)
22 {
23 kfence_disabled = true;
24 }
25
arch_kfence_init_pool(void)26 static inline bool arch_kfence_init_pool(void)
27 {
28 return !kfence_disabled;
29 }
30
kfence_early_init_enabled(void)31 static inline bool kfence_early_init_enabled(void)
32 {
33 return IS_ENABLED(CONFIG_KFENCE) && kfence_early_init;
34 }
35
36 #ifdef CONFIG_PPC64
kfence_protect_page(unsigned long addr,bool protect)37 static inline bool kfence_protect_page(unsigned long addr, bool protect)
38 {
39 struct page *page = virt_to_page((void *)addr);
40
41 __kernel_map_pages(page, 1, !protect);
42
43 return true;
44 }
45 #else
kfence_protect_page(unsigned long addr,bool protect)46 static inline bool kfence_protect_page(unsigned long addr, bool protect)
47 {
48 pte_t *kpte = virt_to_kpte(addr);
49
50 if (protect) {
51 pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
52 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
53 } else {
54 pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
55 }
56
57 return true;
58 }
59 #endif
60
61 #endif /* __ASM_POWERPC_KFENCE_H */
62