1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2011
4 * Author(s): Jan Glauber <[email protected]>
5 */
6 #include <linux/hugetlb.h>
7 #include <linux/proc_fs.h>
8 #include <linux/vmalloc.h>
9 #include <linux/mm.h>
10 #include <asm/cacheflush.h>
11 #include <asm/facility.h>
12 #include <asm/pgalloc.h>
13 #include <asm/kfence.h>
14 #include <asm/page.h>
15 #include <asm/asm.h>
16 #include <asm/set_memory.h>
17
sske_frame(unsigned long addr,unsigned char skey)18 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
19 {
20 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
21 : [addr] "+a" (addr) : [skey] "d" (skey));
22 return addr;
23 }
24
__storage_key_init_range(unsigned long start,unsigned long end)25 void __storage_key_init_range(unsigned long start, unsigned long end)
26 {
27 unsigned long boundary, size;
28
29 while (start < end) {
30 if (MACHINE_HAS_EDAT1) {
31 /* set storage keys for a 1MB frame */
32 size = 1UL << 20;
33 boundary = (start + size) & ~(size - 1);
34 if (boundary <= end) {
35 do {
36 start = sske_frame(start, PAGE_DEFAULT_KEY);
37 } while (start < boundary);
38 continue;
39 }
40 }
41 page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
42 start += PAGE_SIZE;
43 }
44 }
45
46 #ifdef CONFIG_PROC_FS
47 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
48
arch_report_meminfo(struct seq_file * m)49 void arch_report_meminfo(struct seq_file *m)
50 {
51 seq_printf(m, "DirectMap4k: %8lu kB\n",
52 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
53 seq_printf(m, "DirectMap1M: %8lu kB\n",
54 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
55 seq_printf(m, "DirectMap2G: %8lu kB\n",
56 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
57 }
58 #endif /* CONFIG_PROC_FS */
59
pgt_set(unsigned long * old,unsigned long new,unsigned long addr,unsigned long dtt)60 static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
61 unsigned long dtt)
62 {
63 unsigned long *table, mask;
64
65 mask = 0;
66 if (MACHINE_HAS_EDAT2) {
67 switch (dtt) {
68 case CRDTE_DTT_REGION3:
69 mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
70 break;
71 case CRDTE_DTT_SEGMENT:
72 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
73 break;
74 case CRDTE_DTT_PAGE:
75 mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
76 break;
77 }
78 table = (unsigned long *)((unsigned long)old & mask);
79 crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val);
80 } else if (MACHINE_HAS_IDTE) {
81 cspg(old, *old, new);
82 } else {
83 csp((unsigned int *)old + 1, *old, new);
84 }
85 }
86
walk_pte_level(pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned long flags)87 static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
88 unsigned long flags)
89 {
90 pte_t *ptep, new;
91
92 if (flags == SET_MEMORY_4K)
93 return 0;
94 ptep = pte_offset_kernel(pmdp, addr);
95 do {
96 new = *ptep;
97 if (pte_none(new))
98 return -EINVAL;
99 if (flags & SET_MEMORY_RO)
100 new = pte_wrprotect(new);
101 else if (flags & SET_MEMORY_RW)
102 new = pte_mkwrite_novma(pte_mkdirty(new));
103 if (flags & SET_MEMORY_NX)
104 new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
105 else if (flags & SET_MEMORY_X)
106 new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
107 if (flags & SET_MEMORY_INV) {
108 new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
109 } else if (flags & SET_MEMORY_DEF) {
110 new = __pte(pte_val(new) & PAGE_MASK);
111 new = set_pte_bit(new, PAGE_KERNEL);
112 }
113 pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
114 ptep++;
115 addr += PAGE_SIZE;
116 cond_resched();
117 } while (addr < end);
118 return 0;
119 }
120
split_pmd_page(pmd_t * pmdp,unsigned long addr)121 static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
122 {
123 unsigned long pte_addr, prot;
124 pte_t *pt_dir, *ptep;
125 pmd_t new;
126 int i, ro, nx;
127
128 pt_dir = vmem_pte_alloc();
129 if (!pt_dir)
130 return -ENOMEM;
131 pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
132 ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
133 nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
134 prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
135 if (!nx)
136 prot &= ~_PAGE_NOEXEC;
137 ptep = pt_dir;
138 for (i = 0; i < PTRS_PER_PTE; i++) {
139 set_pte(ptep, __pte(pte_addr | prot));
140 pte_addr += PAGE_SIZE;
141 ptep++;
142 }
143 new = __pmd(__pa(pt_dir) | _SEGMENT_ENTRY);
144 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
145 update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
146 update_page_count(PG_DIRECT_MAP_1M, -1);
147 return 0;
148 }
149
modify_pmd_page(pmd_t * pmdp,unsigned long addr,unsigned long flags)150 static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
151 unsigned long flags)
152 {
153 pmd_t new = *pmdp;
154
155 if (flags & SET_MEMORY_RO)
156 new = pmd_wrprotect(new);
157 else if (flags & SET_MEMORY_RW)
158 new = pmd_mkwrite_novma(pmd_mkdirty(new));
159 if (flags & SET_MEMORY_NX)
160 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
161 else if (flags & SET_MEMORY_X)
162 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
163 if (flags & SET_MEMORY_INV) {
164 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
165 } else if (flags & SET_MEMORY_DEF) {
166 new = __pmd(pmd_val(new) & PMD_MASK);
167 new = set_pmd_bit(new, SEGMENT_KERNEL);
168 }
169 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
170 }
171
walk_pmd_level(pud_t * pudp,unsigned long addr,unsigned long end,unsigned long flags)172 static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
173 unsigned long flags)
174 {
175 unsigned long next;
176 int need_split;
177 pmd_t *pmdp;
178 int rc = 0;
179
180 pmdp = pmd_offset(pudp, addr);
181 do {
182 if (pmd_none(*pmdp))
183 return -EINVAL;
184 next = pmd_addr_end(addr, end);
185 if (pmd_leaf(*pmdp)) {
186 need_split = !!(flags & SET_MEMORY_4K);
187 need_split |= !!(addr & ~PMD_MASK);
188 need_split |= !!(addr + PMD_SIZE > next);
189 if (need_split) {
190 rc = split_pmd_page(pmdp, addr);
191 if (rc)
192 return rc;
193 continue;
194 }
195 modify_pmd_page(pmdp, addr, flags);
196 } else {
197 rc = walk_pte_level(pmdp, addr, next, flags);
198 if (rc)
199 return rc;
200 }
201 pmdp++;
202 addr = next;
203 cond_resched();
204 } while (addr < end);
205 return rc;
206 }
207
split_pud_page(pud_t * pudp,unsigned long addr)208 static int split_pud_page(pud_t *pudp, unsigned long addr)
209 {
210 unsigned long pmd_addr, prot;
211 pmd_t *pm_dir, *pmdp;
212 pud_t new;
213 int i, ro, nx;
214
215 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
216 if (!pm_dir)
217 return -ENOMEM;
218 pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
219 ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
220 nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
221 prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
222 if (!nx)
223 prot &= ~_SEGMENT_ENTRY_NOEXEC;
224 pmdp = pm_dir;
225 for (i = 0; i < PTRS_PER_PMD; i++) {
226 set_pmd(pmdp, __pmd(pmd_addr | prot));
227 pmd_addr += PMD_SIZE;
228 pmdp++;
229 }
230 new = __pud(__pa(pm_dir) | _REGION3_ENTRY);
231 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
232 update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
233 update_page_count(PG_DIRECT_MAP_2G, -1);
234 return 0;
235 }
236
modify_pud_page(pud_t * pudp,unsigned long addr,unsigned long flags)237 static void modify_pud_page(pud_t *pudp, unsigned long addr,
238 unsigned long flags)
239 {
240 pud_t new = *pudp;
241
242 if (flags & SET_MEMORY_RO)
243 new = pud_wrprotect(new);
244 else if (flags & SET_MEMORY_RW)
245 new = pud_mkwrite(pud_mkdirty(new));
246 if (flags & SET_MEMORY_NX)
247 new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
248 else if (flags & SET_MEMORY_X)
249 new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
250 if (flags & SET_MEMORY_INV) {
251 new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
252 } else if (flags & SET_MEMORY_DEF) {
253 new = __pud(pud_val(new) & PUD_MASK);
254 new = set_pud_bit(new, REGION3_KERNEL);
255 }
256 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
257 }
258
walk_pud_level(p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long flags)259 static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
260 unsigned long flags)
261 {
262 unsigned long next;
263 int need_split;
264 pud_t *pudp;
265 int rc = 0;
266
267 pudp = pud_offset(p4d, addr);
268 do {
269 if (pud_none(*pudp))
270 return -EINVAL;
271 next = pud_addr_end(addr, end);
272 if (pud_leaf(*pudp)) {
273 need_split = !!(flags & SET_MEMORY_4K);
274 need_split |= !!(addr & ~PUD_MASK);
275 need_split |= !!(addr + PUD_SIZE > next);
276 if (need_split) {
277 rc = split_pud_page(pudp, addr);
278 if (rc)
279 break;
280 continue;
281 }
282 modify_pud_page(pudp, addr, flags);
283 } else {
284 rc = walk_pmd_level(pudp, addr, next, flags);
285 }
286 pudp++;
287 addr = next;
288 cond_resched();
289 } while (addr < end && !rc);
290 return rc;
291 }
292
walk_p4d_level(pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long flags)293 static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
294 unsigned long flags)
295 {
296 unsigned long next;
297 p4d_t *p4dp;
298 int rc = 0;
299
300 p4dp = p4d_offset(pgd, addr);
301 do {
302 if (p4d_none(*p4dp))
303 return -EINVAL;
304 next = p4d_addr_end(addr, end);
305 rc = walk_pud_level(p4dp, addr, next, flags);
306 p4dp++;
307 addr = next;
308 cond_resched();
309 } while (addr < end && !rc);
310 return rc;
311 }
312
313 DEFINE_MUTEX(cpa_mutex);
314
change_page_attr(unsigned long addr,unsigned long end,unsigned long flags)315 static int change_page_attr(unsigned long addr, unsigned long end,
316 unsigned long flags)
317 {
318 unsigned long next;
319 int rc = -EINVAL;
320 pgd_t *pgdp;
321
322 pgdp = pgd_offset_k(addr);
323 do {
324 if (pgd_none(*pgdp))
325 break;
326 next = pgd_addr_end(addr, end);
327 rc = walk_p4d_level(pgdp, addr, next, flags);
328 if (rc)
329 break;
330 cond_resched();
331 } while (pgdp++, addr = next, addr < end && !rc);
332 return rc;
333 }
334
change_page_attr_alias(unsigned long addr,unsigned long end,unsigned long flags)335 static int change_page_attr_alias(unsigned long addr, unsigned long end,
336 unsigned long flags)
337 {
338 unsigned long alias, offset, va_start, va_end;
339 struct vm_struct *area;
340 int rc = 0;
341
342 /*
343 * Changes to read-only permissions on kernel VA mappings are also
344 * applied to the kernel direct mapping. Execute permissions are
345 * intentionally not transferred to keep all allocated pages within
346 * the direct mapping non-executable.
347 */
348 flags &= SET_MEMORY_RO | SET_MEMORY_RW;
349 if (!flags)
350 return 0;
351 area = NULL;
352 while (addr < end) {
353 if (!area)
354 area = find_vm_area((void *)addr);
355 if (!area || !(area->flags & VM_ALLOC))
356 return 0;
357 va_start = (unsigned long)area->addr;
358 va_end = va_start + area->nr_pages * PAGE_SIZE;
359 offset = (addr - va_start) >> PAGE_SHIFT;
360 alias = (unsigned long)page_address(area->pages[offset]);
361 rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
362 if (rc)
363 break;
364 addr += PAGE_SIZE;
365 if (addr >= va_end)
366 area = NULL;
367 }
368 return rc;
369 }
370
__set_memory(unsigned long addr,unsigned long numpages,unsigned long flags)371 int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags)
372 {
373 unsigned long end;
374 int rc;
375
376 if (!MACHINE_HAS_NX)
377 flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
378 if (!flags)
379 return 0;
380 if (!numpages)
381 return 0;
382 addr &= PAGE_MASK;
383 end = addr + numpages * PAGE_SIZE;
384 mutex_lock(&cpa_mutex);
385 rc = change_page_attr(addr, end, flags);
386 if (rc)
387 goto out;
388 rc = change_page_attr_alias(addr, end, flags);
389 out:
390 mutex_unlock(&cpa_mutex);
391 return rc;
392 }
393
set_direct_map_invalid_noflush(struct page * page)394 int set_direct_map_invalid_noflush(struct page *page)
395 {
396 return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
397 }
398
set_direct_map_default_noflush(struct page * page)399 int set_direct_map_default_noflush(struct page *page)
400 {
401 return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
402 }
403
set_direct_map_valid_noflush(struct page * page,unsigned nr,bool valid)404 int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
405 {
406 unsigned long flags;
407
408 if (valid)
409 flags = SET_MEMORY_DEF;
410 else
411 flags = SET_MEMORY_INV;
412
413 return __set_memory((unsigned long)page_to_virt(page), nr, flags);
414 }
415
kernel_page_present(struct page * page)416 bool kernel_page_present(struct page *page)
417 {
418 unsigned long addr;
419 unsigned int cc;
420
421 addr = (unsigned long)page_address(page);
422 asm volatile(
423 " lra %[addr],0(%[addr])\n"
424 CC_IPM(cc)
425 : CC_OUT(cc, cc), [addr] "+a" (addr)
426 :
427 : CC_CLOBBER);
428 return CC_TRANSFORM(cc) == 0;
429 }
430
431 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
432
ipte_range(pte_t * pte,unsigned long address,int nr)433 static void ipte_range(pte_t *pte, unsigned long address, int nr)
434 {
435 int i;
436
437 if (test_facility(13)) {
438 __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
439 return;
440 }
441 for (i = 0; i < nr; i++) {
442 __ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
443 address += PAGE_SIZE;
444 pte++;
445 }
446 }
447
__kernel_map_pages(struct page * page,int numpages,int enable)448 void __kernel_map_pages(struct page *page, int numpages, int enable)
449 {
450 unsigned long address;
451 pte_t *ptep, pte;
452 int nr, i, j;
453
454 for (i = 0; i < numpages;) {
455 address = (unsigned long)page_to_virt(page + i);
456 ptep = virt_to_kpte(address);
457 nr = (unsigned long)ptep >> ilog2(sizeof(long));
458 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
459 nr = min(numpages - i, nr);
460 if (enable) {
461 for (j = 0; j < nr; j++) {
462 pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
463 set_pte(ptep, pte);
464 address += PAGE_SIZE;
465 ptep++;
466 }
467 } else {
468 ipte_range(ptep, address, nr);
469 }
470 i += nr;
471 }
472 }
473
474 #endif /* CONFIG_DEBUG_PAGEALLOC */
475