1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <asm/pgalloc.h>
4 #include <linux/gfp.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
7
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)8 int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep,
10 pte_t entry, int dirty)
11 {
12 asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
13 : : : : svvptc);
14
15 if (!pte_same(ptep_get(ptep), entry))
16 __set_pte_at(vma->vm_mm, ptep, entry);
17 /*
18 * update_mmu_cache will unconditionally execute, handling both
19 * the case that the PTE changed and the spurious fault case.
20 */
21 return true;
22
23 svvptc:
24 if (!pte_same(ptep_get(ptep), entry)) {
25 __set_pte_at(vma->vm_mm, ptep, entry);
26 /* Here only not svadu is impacted */
27 flush_tlb_page(vma, address);
28 return true;
29 }
30
31 return false;
32 }
33
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)34 int ptep_test_and_clear_young(struct vm_area_struct *vma,
35 unsigned long address,
36 pte_t *ptep)
37 {
38 if (!pte_young(ptep_get(ptep)))
39 return 0;
40 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
41 }
42 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
43
44 #ifdef CONFIG_64BIT
pud_offset(p4d_t * p4d,unsigned long address)45 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
46 {
47 if (pgtable_l4_enabled)
48 return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
49
50 return (pud_t *)p4d;
51 }
52
p4d_offset(pgd_t * pgd,unsigned long address)53 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
54 {
55 if (pgtable_l5_enabled)
56 return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
57
58 return (p4d_t *)pgd;
59 }
60 #endif
61
62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)63 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
64 {
65 return 0;
66 }
67
p4d_clear_huge(p4d_t * p4d)68 void p4d_clear_huge(p4d_t *p4d)
69 {
70 }
71
pud_set_huge(pud_t * pud,phys_addr_t phys,pgprot_t prot)72 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
73 {
74 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
75
76 set_pud(pud, new_pud);
77 return 1;
78 }
79
pud_clear_huge(pud_t * pud)80 int pud_clear_huge(pud_t *pud)
81 {
82 if (!pud_leaf(pudp_get(pud)))
83 return 0;
84 pud_clear(pud);
85 return 1;
86 }
87
pud_free_pmd_page(pud_t * pud,unsigned long addr)88 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
89 {
90 pmd_t *pmd = pud_pgtable(pudp_get(pud));
91 int i;
92
93 pud_clear(pud);
94
95 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
96
97 for (i = 0; i < PTRS_PER_PMD; i++) {
98 if (!pmd_none(pmd[i])) {
99 pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
100
101 pte_free_kernel(NULL, pte);
102 }
103 }
104
105 pmd_free(NULL, pmd);
106
107 return 1;
108 }
109
pmd_set_huge(pmd_t * pmd,phys_addr_t phys,pgprot_t prot)110 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
111 {
112 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
113
114 set_pmd(pmd, new_pmd);
115 return 1;
116 }
117
pmd_clear_huge(pmd_t * pmd)118 int pmd_clear_huge(pmd_t *pmd)
119 {
120 if (!pmd_leaf(pmdp_get(pmd)))
121 return 0;
122 pmd_clear(pmd);
123 return 1;
124 }
125
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)126 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
127 {
128 pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
129
130 pmd_clear(pmd);
131
132 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
133 pte_free_kernel(NULL, pte);
134 return 1;
135 }
136
137 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)139 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
140 unsigned long address, pmd_t *pmdp)
141 {
142 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
143
144 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
145 VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
146 /*
147 * When leaf PTE entries (regular pages) are collapsed into a leaf
148 * PMD entry (huge page), a valid non-leaf PTE is converted into a
149 * valid leaf PTE at the level 1 page table. Since the sfence.vma
150 * forms that specify an address only apply to leaf PTEs, we need a
151 * global flush here. collapse_huge_page() assumes these flushes are
152 * eager, so just do the fence here.
153 */
154 flush_tlb_mm(vma->vm_mm);
155 return pmd;
156 }
157 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
158