1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_PGALLOC_H
3 #define __ASM_GENERIC_PGALLOC_H
4 
5 #ifdef CONFIG_MMU
6 
7 #define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
8 #define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9 
10 /**
11  * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
12  * @mm: the mm_struct of the current context
13  *
14  * This function is intended for architectures that need
15  * anything beyond simple page allocation.
16  *
17  * Return: pointer to the allocated memory or %NULL on error
18  */
__pte_alloc_one_kernel_noprof(struct mm_struct * mm)19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
20 {
21 	struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL &
22 			~__GFP_HIGHMEM, 0);
23 
24 	if (!ptdesc)
25 		return NULL;
26 	return ptdesc_address(ptdesc);
27 }
28 #define __pte_alloc_one_kernel(...)	alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
29 
30 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
31 /**
32  * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
33  * @mm: the mm_struct of the current context
34  *
35  * Return: pointer to the allocated memory or %NULL on error
36  */
pte_alloc_one_kernel_noprof(struct mm_struct * mm)37 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
38 {
39 	return __pte_alloc_one_kernel_noprof(mm);
40 }
41 #define pte_alloc_one_kernel(...)	alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
42 #endif
43 
44 /**
45  * pte_free_kernel - free PTE-level kernel page table memory
46  * @mm: the mm_struct of the current context
47  * @pte: pointer to the memory containing the page table
48  */
pte_free_kernel(struct mm_struct * mm,pte_t * pte)49 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
50 {
51 	pagetable_free(virt_to_ptdesc(pte));
52 }
53 
54 /**
55  * __pte_alloc_one - allocate memory for a PTE-level user page table
56  * @mm: the mm_struct of the current context
57  * @gfp: GFP flags to use for the allocation
58  *
59  * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
60  *
61  * This function is intended for architectures that need
62  * anything beyond simple page allocation or must have custom GFP flags.
63  *
64  * Return: `struct page` referencing the ptdesc or %NULL on error
65  */
__pte_alloc_one_noprof(struct mm_struct * mm,gfp_t gfp)66 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
67 {
68 	struct ptdesc *ptdesc;
69 
70 	ptdesc = pagetable_alloc_noprof(gfp, 0);
71 	if (!ptdesc)
72 		return NULL;
73 	if (!pagetable_pte_ctor(ptdesc)) {
74 		pagetable_free(ptdesc);
75 		return NULL;
76 	}
77 
78 	return ptdesc_page(ptdesc);
79 }
80 #define __pte_alloc_one(...)	alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
81 
82 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
83 /**
84  * pte_alloc_one - allocate a page for PTE-level user page table
85  * @mm: the mm_struct of the current context
86  *
87  * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
88  *
89  * Return: `struct page` referencing the ptdesc or %NULL on error
90  */
pte_alloc_one_noprof(struct mm_struct * mm)91 static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
92 {
93 	return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
94 }
95 #define pte_alloc_one(...)	alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
96 #endif
97 
98 /*
99  * Should really implement gc for free page table pages. This could be
100  * done with a reference count in struct page.
101  */
102 
103 /**
104  * pte_free - free PTE-level user page table memory
105  * @mm: the mm_struct of the current context
106  * @pte_page: the `struct page` referencing the ptdesc
107  */
pte_free(struct mm_struct * mm,struct page * pte_page)108 static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
109 {
110 	struct ptdesc *ptdesc = page_ptdesc(pte_page);
111 
112 	pagetable_dtor_free(ptdesc);
113 }
114 
115 
116 #if CONFIG_PGTABLE_LEVELS > 2
117 
118 #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
119 /**
120  * pmd_alloc_one - allocate memory for a PMD-level page table
121  * @mm: the mm_struct of the current context
122  *
123  * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
124  *
125  * Allocations use %GFP_PGTABLE_USER in user context and
126  * %GFP_PGTABLE_KERNEL in kernel context.
127  *
128  * Return: pointer to the allocated memory or %NULL on error
129  */
pmd_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)130 static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
131 {
132 	struct ptdesc *ptdesc;
133 	gfp_t gfp = GFP_PGTABLE_USER;
134 
135 	if (mm == &init_mm)
136 		gfp = GFP_PGTABLE_KERNEL;
137 	ptdesc = pagetable_alloc_noprof(gfp, 0);
138 	if (!ptdesc)
139 		return NULL;
140 	if (!pagetable_pmd_ctor(ptdesc)) {
141 		pagetable_free(ptdesc);
142 		return NULL;
143 	}
144 	return ptdesc_address(ptdesc);
145 }
146 #define pmd_alloc_one(...)	alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
147 #endif
148 
149 #ifndef __HAVE_ARCH_PMD_FREE
pmd_free(struct mm_struct * mm,pmd_t * pmd)150 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
151 {
152 	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
153 
154 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
155 	pagetable_dtor_free(ptdesc);
156 }
157 #endif
158 
159 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
160 
161 #if CONFIG_PGTABLE_LEVELS > 3
162 
__pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)163 static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
164 {
165 	gfp_t gfp = GFP_PGTABLE_USER;
166 	struct ptdesc *ptdesc;
167 
168 	if (mm == &init_mm)
169 		gfp = GFP_PGTABLE_KERNEL;
170 	gfp &= ~__GFP_HIGHMEM;
171 
172 	ptdesc = pagetable_alloc_noprof(gfp, 0);
173 	if (!ptdesc)
174 		return NULL;
175 
176 	pagetable_pud_ctor(ptdesc);
177 	return ptdesc_address(ptdesc);
178 }
179 #define __pud_alloc_one(...)	alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
180 
181 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
182 /**
183  * pud_alloc_one - allocate memory for a PUD-level page table
184  * @mm: the mm_struct of the current context
185  *
186  * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
187  * and %GFP_PGTABLE_KERNEL for kernel context.
188  *
189  * Return: pointer to the allocated memory or %NULL on error
190  */
pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)191 static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
192 {
193 	return __pud_alloc_one_noprof(mm, addr);
194 }
195 #define pud_alloc_one(...)	alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
196 #endif
197 
__pud_free(struct mm_struct * mm,pud_t * pud)198 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
199 {
200 	struct ptdesc *ptdesc = virt_to_ptdesc(pud);
201 
202 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
203 	pagetable_dtor_free(ptdesc);
204 }
205 
206 #ifndef __HAVE_ARCH_PUD_FREE
pud_free(struct mm_struct * mm,pud_t * pud)207 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
208 {
209 	__pud_free(mm, pud);
210 }
211 #endif
212 
213 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
214 
215 #if CONFIG_PGTABLE_LEVELS > 4
216 
__p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)217 static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
218 {
219 	gfp_t gfp = GFP_PGTABLE_USER;
220 	struct ptdesc *ptdesc;
221 
222 	if (mm == &init_mm)
223 		gfp = GFP_PGTABLE_KERNEL;
224 	gfp &= ~__GFP_HIGHMEM;
225 
226 	ptdesc = pagetable_alloc_noprof(gfp, 0);
227 	if (!ptdesc)
228 		return NULL;
229 
230 	pagetable_p4d_ctor(ptdesc);
231 	return ptdesc_address(ptdesc);
232 }
233 #define __p4d_alloc_one(...)	alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
234 
235 #ifndef __HAVE_ARCH_P4D_ALLOC_ONE
p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)236 static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
237 {
238 	return __p4d_alloc_one_noprof(mm, addr);
239 }
240 #define p4d_alloc_one(...)	alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
241 #endif
242 
__p4d_free(struct mm_struct * mm,p4d_t * p4d)243 static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
244 {
245 	struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
246 
247 	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
248 	pagetable_dtor_free(ptdesc);
249 }
250 
251 #ifndef __HAVE_ARCH_P4D_FREE
p4d_free(struct mm_struct * mm,p4d_t * p4d)252 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
253 {
254 	if (!mm_p4d_folded(mm))
255 		__p4d_free(mm, p4d);
256 }
257 #endif
258 
259 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
260 
__pgd_alloc_noprof(struct mm_struct * mm,unsigned int order)261 static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
262 {
263 	gfp_t gfp = GFP_PGTABLE_USER;
264 	struct ptdesc *ptdesc;
265 
266 	if (mm == &init_mm)
267 		gfp = GFP_PGTABLE_KERNEL;
268 	gfp &= ~__GFP_HIGHMEM;
269 
270 	ptdesc = pagetable_alloc_noprof(gfp, order);
271 	if (!ptdesc)
272 		return NULL;
273 
274 	pagetable_pgd_ctor(ptdesc);
275 	return ptdesc_address(ptdesc);
276 }
277 #define __pgd_alloc(...)	alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
278 
__pgd_free(struct mm_struct * mm,pgd_t * pgd)279 static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
280 {
281 	struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
282 
283 	BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
284 	pagetable_dtor_free(ptdesc);
285 }
286 
287 #ifndef __HAVE_ARCH_PGD_FREE
pgd_free(struct mm_struct * mm,pgd_t * pgd)288 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
289 {
290 	__pgd_free(mm, pgd);
291 }
292 #endif
293 
294 #endif /* CONFIG_MMU */
295 
296 #endif /* __ASM_GENERIC_PGALLOC_H */
297