1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Virtual Memory Map support
4 *
5 * (C) 2007 sgi. Christoph Lameter.
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
20 */
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30
31 #include <asm/dma.h>
32 #include <asm/pgalloc.h>
33
34 #include "internal.h"
35
36 /*
37 * Allocate a block of memory to be used to back the virtual memory map
38 * or to back the page tables that are used to create the mapping.
39 * Uses the main allocators if they are available, else bootmem.
40 */
41
__earlyonly_bootmem_alloc(int node,unsigned long size,unsigned long align,unsigned long goal)42 static void * __ref __earlyonly_bootmem_alloc(int node,
43 unsigned long size,
44 unsigned long align,
45 unsigned long goal)
46 {
47 return memmap_alloc(size, align, goal, node, false);
48 }
49
vmemmap_alloc_block(unsigned long size,int node)50 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
51 {
52 /* If the main allocator is up use that, fallback to bootmem. */
53 if (slab_is_available()) {
54 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
55 int order = get_order(size);
56 static bool warned;
57 struct page *page;
58
59 page = alloc_pages_node(node, gfp_mask, order);
60 if (page)
61 return page_address(page);
62
63 if (!warned) {
64 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
65 "vmemmap alloc failure: order:%u", order);
66 warned = true;
67 }
68 return NULL;
69 } else
70 return __earlyonly_bootmem_alloc(node, size, size,
71 __pa(MAX_DMA_ADDRESS));
72 }
73
74 static void * __meminit altmap_alloc_block_buf(unsigned long size,
75 struct vmem_altmap *altmap);
76
77 /* need to make sure size is all the same during early stage */
vmemmap_alloc_block_buf(unsigned long size,int node,struct vmem_altmap * altmap)78 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
79 struct vmem_altmap *altmap)
80 {
81 void *ptr;
82
83 if (altmap)
84 return altmap_alloc_block_buf(size, altmap);
85
86 ptr = sparse_buffer_alloc(size);
87 if (!ptr)
88 ptr = vmemmap_alloc_block(size, node);
89 return ptr;
90 }
91
vmem_altmap_next_pfn(struct vmem_altmap * altmap)92 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
93 {
94 return altmap->base_pfn + altmap->reserve + altmap->alloc
95 + altmap->align;
96 }
97
vmem_altmap_nr_free(struct vmem_altmap * altmap)98 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
99 {
100 unsigned long allocated = altmap->alloc + altmap->align;
101
102 if (altmap->free > allocated)
103 return altmap->free - allocated;
104 return 0;
105 }
106
altmap_alloc_block_buf(unsigned long size,struct vmem_altmap * altmap)107 static void * __meminit altmap_alloc_block_buf(unsigned long size,
108 struct vmem_altmap *altmap)
109 {
110 unsigned long pfn, nr_pfns, nr_align;
111
112 if (size & ~PAGE_MASK) {
113 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
114 __func__, size);
115 return NULL;
116 }
117
118 pfn = vmem_altmap_next_pfn(altmap);
119 nr_pfns = size >> PAGE_SHIFT;
120 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
121 nr_align = ALIGN(pfn, nr_align) - pfn;
122 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
123 return NULL;
124
125 altmap->alloc += nr_pfns;
126 altmap->align += nr_align;
127 pfn += nr_align;
128
129 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
130 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
131 return __va(__pfn_to_phys(pfn));
132 }
133
vmemmap_verify(pte_t * pte,int node,unsigned long start,unsigned long end)134 void __meminit vmemmap_verify(pte_t *pte, int node,
135 unsigned long start, unsigned long end)
136 {
137 unsigned long pfn = pte_pfn(ptep_get(pte));
138 int actual_node = early_pfn_to_nid(pfn);
139
140 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
141 pr_warn_once("[%lx-%lx] potential offnode page_structs\n",
142 start, end - 1);
143 }
144
vmemmap_pte_populate(pmd_t * pmd,unsigned long addr,int node,struct vmem_altmap * altmap,struct page * reuse)145 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
146 struct vmem_altmap *altmap,
147 struct page *reuse)
148 {
149 pte_t *pte = pte_offset_kernel(pmd, addr);
150 if (pte_none(ptep_get(pte))) {
151 pte_t entry;
152 void *p;
153
154 if (!reuse) {
155 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
156 if (!p)
157 return NULL;
158 } else {
159 /*
160 * When a PTE/PMD entry is freed from the init_mm
161 * there's a free_pages() call to this page allocated
162 * above. Thus this get_page() is paired with the
163 * put_page_testzero() on the freeing path.
164 * This can only called by certain ZONE_DEVICE path,
165 * and through vmemmap_populate_compound_pages() when
166 * slab is available.
167 */
168 get_page(reuse);
169 p = page_to_virt(reuse);
170 }
171 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
172 set_pte_at(&init_mm, addr, pte, entry);
173 }
174 return pte;
175 }
176
vmemmap_alloc_block_zero(unsigned long size,int node)177 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
178 {
179 void *p = vmemmap_alloc_block(size, node);
180
181 if (!p)
182 return NULL;
183 memset(p, 0, size);
184
185 return p;
186 }
187
vmemmap_pmd_populate(pud_t * pud,unsigned long addr,int node)188 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
189 {
190 pmd_t *pmd = pmd_offset(pud, addr);
191 if (pmd_none(*pmd)) {
192 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
193 if (!p)
194 return NULL;
195 kernel_pte_init(p);
196 pmd_populate_kernel(&init_mm, pmd, p);
197 }
198 return pmd;
199 }
200
vmemmap_pud_populate(p4d_t * p4d,unsigned long addr,int node)201 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
202 {
203 pud_t *pud = pud_offset(p4d, addr);
204 if (pud_none(*pud)) {
205 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
206 if (!p)
207 return NULL;
208 pmd_init(p);
209 pud_populate(&init_mm, pud, p);
210 }
211 return pud;
212 }
213
vmemmap_p4d_populate(pgd_t * pgd,unsigned long addr,int node)214 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
215 {
216 p4d_t *p4d = p4d_offset(pgd, addr);
217 if (p4d_none(*p4d)) {
218 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
219 if (!p)
220 return NULL;
221 pud_init(p);
222 p4d_populate(&init_mm, p4d, p);
223 }
224 return p4d;
225 }
226
vmemmap_pgd_populate(unsigned long addr,int node)227 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
228 {
229 pgd_t *pgd = pgd_offset_k(addr);
230 if (pgd_none(*pgd)) {
231 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
232 if (!p)
233 return NULL;
234 pgd_populate(&init_mm, pgd, p);
235 }
236 return pgd;
237 }
238
vmemmap_populate_address(unsigned long addr,int node,struct vmem_altmap * altmap,struct page * reuse)239 static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
240 struct vmem_altmap *altmap,
241 struct page *reuse)
242 {
243 pgd_t *pgd;
244 p4d_t *p4d;
245 pud_t *pud;
246 pmd_t *pmd;
247 pte_t *pte;
248
249 pgd = vmemmap_pgd_populate(addr, node);
250 if (!pgd)
251 return NULL;
252 p4d = vmemmap_p4d_populate(pgd, addr, node);
253 if (!p4d)
254 return NULL;
255 pud = vmemmap_pud_populate(p4d, addr, node);
256 if (!pud)
257 return NULL;
258 pmd = vmemmap_pmd_populate(pud, addr, node);
259 if (!pmd)
260 return NULL;
261 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse);
262 if (!pte)
263 return NULL;
264 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
265
266 return pte;
267 }
268
vmemmap_populate_range(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap,struct page * reuse)269 static int __meminit vmemmap_populate_range(unsigned long start,
270 unsigned long end, int node,
271 struct vmem_altmap *altmap,
272 struct page *reuse)
273 {
274 unsigned long addr = start;
275 pte_t *pte;
276
277 for (; addr < end; addr += PAGE_SIZE) {
278 pte = vmemmap_populate_address(addr, node, altmap, reuse);
279 if (!pte)
280 return -ENOMEM;
281 }
282
283 return 0;
284 }
285
vmemmap_populate_basepages(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)286 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
287 int node, struct vmem_altmap *altmap)
288 {
289 return vmemmap_populate_range(start, end, node, altmap, NULL);
290 }
291
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)292 void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
293 unsigned long addr, unsigned long next)
294 {
295 }
296
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)297 int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
298 unsigned long addr, unsigned long next)
299 {
300 return 0;
301 }
302
vmemmap_populate_hugepages(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)303 int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
304 int node, struct vmem_altmap *altmap)
305 {
306 unsigned long addr;
307 unsigned long next;
308 pgd_t *pgd;
309 p4d_t *p4d;
310 pud_t *pud;
311 pmd_t *pmd;
312
313 for (addr = start; addr < end; addr = next) {
314 next = pmd_addr_end(addr, end);
315
316 pgd = vmemmap_pgd_populate(addr, node);
317 if (!pgd)
318 return -ENOMEM;
319
320 p4d = vmemmap_p4d_populate(pgd, addr, node);
321 if (!p4d)
322 return -ENOMEM;
323
324 pud = vmemmap_pud_populate(p4d, addr, node);
325 if (!pud)
326 return -ENOMEM;
327
328 pmd = pmd_offset(pud, addr);
329 if (pmd_none(READ_ONCE(*pmd))) {
330 void *p;
331
332 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
333 if (p) {
334 vmemmap_set_pmd(pmd, p, node, addr, next);
335 continue;
336 } else if (altmap) {
337 /*
338 * No fallback: In any case we care about, the
339 * altmap should be reasonably sized and aligned
340 * such that vmemmap_alloc_block_buf() will always
341 * succeed. For consistency with the PTE case,
342 * return an error here as failure could indicate
343 * a configuration issue with the size of the altmap.
344 */
345 return -ENOMEM;
346 }
347 } else if (vmemmap_check_pmd(pmd, node, addr, next))
348 continue;
349 if (vmemmap_populate_basepages(addr, next, node, altmap))
350 return -ENOMEM;
351 }
352 return 0;
353 }
354
355 #ifndef vmemmap_populate_compound_pages
356 /*
357 * For compound pages bigger than section size (e.g. x86 1G compound
358 * pages with 2M subsection size) fill the rest of sections as tail
359 * pages.
360 *
361 * Note that memremap_pages() resets @nr_range value and will increment
362 * it after each range successful onlining. Thus the value or @nr_range
363 * at section memmap populate corresponds to the in-progress range
364 * being onlined here.
365 */
reuse_compound_section(unsigned long start_pfn,struct dev_pagemap * pgmap)366 static bool __meminit reuse_compound_section(unsigned long start_pfn,
367 struct dev_pagemap *pgmap)
368 {
369 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
370 unsigned long offset = start_pfn -
371 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
372
373 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;
374 }
375
compound_section_tail_page(unsigned long addr)376 static pte_t * __meminit compound_section_tail_page(unsigned long addr)
377 {
378 pte_t *pte;
379
380 addr -= PAGE_SIZE;
381
382 /*
383 * Assuming sections are populated sequentially, the previous section's
384 * page data can be reused.
385 */
386 pte = pte_offset_kernel(pmd_off_k(addr), addr);
387 if (!pte)
388 return NULL;
389
390 return pte;
391 }
392
vmemmap_populate_compound_pages(unsigned long start_pfn,unsigned long start,unsigned long end,int node,struct dev_pagemap * pgmap)393 static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
394 unsigned long start,
395 unsigned long end, int node,
396 struct dev_pagemap *pgmap)
397 {
398 unsigned long size, addr;
399 pte_t *pte;
400 int rc;
401
402 if (reuse_compound_section(start_pfn, pgmap)) {
403 pte = compound_section_tail_page(start);
404 if (!pte)
405 return -ENOMEM;
406
407 /*
408 * Reuse the page that was populated in the prior iteration
409 * with just tail struct pages.
410 */
411 return vmemmap_populate_range(start, end, node, NULL,
412 pte_page(ptep_get(pte)));
413 }
414
415 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
416 for (addr = start; addr < end; addr += size) {
417 unsigned long next, last = addr + size;
418
419 /* Populate the head page vmemmap page */
420 pte = vmemmap_populate_address(addr, node, NULL, NULL);
421 if (!pte)
422 return -ENOMEM;
423
424 /* Populate the tail pages vmemmap page */
425 next = addr + PAGE_SIZE;
426 pte = vmemmap_populate_address(next, node, NULL, NULL);
427 if (!pte)
428 return -ENOMEM;
429
430 /*
431 * Reuse the previous page for the rest of tail pages
432 * See layout diagram in Documentation/mm/vmemmap_dedup.rst
433 */
434 next += PAGE_SIZE;
435 rc = vmemmap_populate_range(next, last, node, NULL,
436 pte_page(ptep_get(pte)));
437 if (rc)
438 return -ENOMEM;
439 }
440
441 return 0;
442 }
443
444 #endif
445
__populate_section_memmap(unsigned long pfn,unsigned long nr_pages,int nid,struct vmem_altmap * altmap,struct dev_pagemap * pgmap)446 struct page * __meminit __populate_section_memmap(unsigned long pfn,
447 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
448 struct dev_pagemap *pgmap)
449 {
450 unsigned long start = (unsigned long) pfn_to_page(pfn);
451 unsigned long end = start + nr_pages * sizeof(struct page);
452 int r;
453
454 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
455 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
456 return NULL;
457
458 if (vmemmap_can_optimize(altmap, pgmap))
459 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
460 else
461 r = vmemmap_populate(start, end, nid, altmap);
462
463 if (r < 0)
464 return NULL;
465
466 if (system_state == SYSTEM_BOOTING)
467 memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
468 else
469 memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
470
471 return pfn_to_page(pfn);
472 }
473