Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2020 FORTH-ICS/CARV
20 #include <linux/dma-map-ops.h>
104 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, in print_mlk()
105 (((t) - (b)) >> LOG2_SZ_1K)); in print_mlk()
110 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, in print_mlm()
111 (((t) - (b)) >> LOG2_SZ_1M)); in print_mlm()
116 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t, in print_mlg()
117 (((t) - (b)) >> LOG2_SZ_1G)); in print_mlg()
123 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t, in print_mlt()
124 (((t) - (b)) >> LOG2_SZ_1T)); in print_mlt()
132 unsigned long diff = t - b; in print_ml()
186 * non-coherent platforms. in mem_init()
237 * map the kernel in the linear mapping as read-only: we do not want in setup_bootmem()
241 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem()
245 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); in setup_bootmem()
249 * at worst, we map the linear mapping with PMD mappings. in setup_bootmem()
259 * In 64-bit, any use of __va/__pa before this point is wrong as we in setup_bootmem()
263 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; in setup_bootmem()
266 * The size of the linear page mapping may restrict the amount of in setup_bootmem()
273 max_mapped_addr - phys_ram_base); in setup_bootmem()
274 pr_warn("Physical memory overflows the linear mapping size: region above %pa removed", in setup_bootmem()
281 * addresses greater than (void *)(-PAGE_SIZE) because: in setup_bootmem()
282 * - This memory would overlap with ERR_PTR in setup_bootmem()
283 * - This memory belongs to high memory, which is not supported in setup_bootmem()
285 * This is not applicable to 64-bit kernel, because virtual addresses in setup_bootmem()
286 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are in setup_bootmem()
287 * occupied by kernel mapping. Also it is unrealistic for high memory in setup_bootmem()
288 * to exist on 64-bit platforms. in setup_bootmem()
291 max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE); in setup_bootmem()
292 memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); in setup_bootmem()
301 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); in setup_bootmem()
323 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); in setup_bootmem()
478 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); in alloc_pmd_early()
541 /* Only one PUD is available for early mapping */ in alloc_pud_early()
542 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_pud_early()
579 /* Only one P4D is available for early mapping */ in alloc_p4d_early()
580 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_p4d_early()
714 !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) in best_map_size()
718 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size()
722 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size()
737 size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); in __copy_data()
750 * In 64-bit kernel, the kernel mapping is outside the linear mapping so in pgprot_from_va()
751 * we must protect its linear mapping alias from being executed and in pgprot_from_va()
797 pr_info("Disabled 4-level and 5-level paging"); in print_no4lvl()
804 pr_info("Disabled 5-level paging"); in print_no5lvl()
811 mmap_rnd_bits_max = MMAP_VA_BITS - PAGE_SHIFT - 3; in set_mmap_rnd_bits_max()
815 * There is a simple way to determine if 4-level is supported by the
816 * underlying hardware: establish 1:1 mapping in 4-level page table mode
879 * setup_vm() is called from head.S with MMU-off.
883 * 1) It should use PC-relative addressing for accessing kernel symbols.
906 uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; in relocate_kernel()
911 uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; in relocate_kernel()
914 Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); in relocate_kernel()
915 Elf64_Addr relocated_addr = rela->r_addend; in relocate_kernel()
917 if (rela->r_info != R_RISCV_RELATIVE) in relocate_kernel()
924 * mm->context.vdso in VDSO_OFFSET macro. in relocate_kernel()
944 kernel_map.xiprom + (va - kernel_map.virt_addr), in create_kernel_page_table()
948 start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start; in create_kernel_page_table()
952 kernel_map.phys_addr + (va - start_va), in create_kernel_page_table()
963 kernel_map.phys_addr + (va - kernel_map.virt_addr), in create_kernel_page_table()
971 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
972 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
979 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); in create_fdt_early_page_table()
984 /* In 32-bit only, the fdt lies in its own PGD */ in create_fdt_early_page_table()
995 dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); in create_fdt_early_page_table()
998 * For 64-bit kernel, __va can't be used since it would return a linear in create_fdt_early_page_table()
999 * mapping address whereas dtb_early_va will be used before in create_fdt_early_page_table()
1000 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the in create_fdt_early_page_table()
1001 * kernel is mapped in the linear mapping, that makes no difference. in create_fdt_early_page_table()
1030 * map the allocated physical pages since the linear mapping does not exist yet.
1092 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1102 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm()
1117 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); in setup_vm()
1124 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1126 kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; in setup_vm()
1127 kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr in setup_vm()
1128 + (uintptr_t)&_sdata - (uintptr_t)&_start; in setup_vm()
1132 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; in setup_vm()
1133 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; in setup_vm()
1142 * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem, in setup_vm()
1145 * for the linear mapping. This is only possible because the kernel in setup_vm()
1146 * mapping lies outside the linear mapping. in setup_vm()
1147 * In 32-bit however, as the kernel resides in the linear mapping, in setup_vm()
1148 * setup_vm_final can not change the mapping established here, in setup_vm()
1154 0UL : PAGE_OFFSET - kernel_map.phys_addr; in setup_vm()
1167 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); in setup_vm()
1177 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm()
1228 /* Setup early mapping for FDT early scan */ in setup_vm()
1232 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap in setup_vm()
1273 best_map_size(pa, va, end - pa); in create_linear_mapping_range()
1288 phys_addr_t ktext_size = __init_data_begin - _start; in create_linear_mapping_page_table()
1290 phys_addr_t krodata_size = _data - __start_rodata; in create_linear_mapping_page_table()
1300 * before we setup the linear mapping so that we avoid using hugepages in create_linear_mapping_page_table()
1310 /* Map all memory banks in the linear mapping */ in create_linear_mapping_page_table()
1341 * In 32-bit, the device tree lies in a pgd entry, so it must be copied in setup_vm_final()
1353 /* Map the linear mapping */ in setup_vm_final()
1389 * reserve_crashkernel() - reserves memory for crash kernel
1421 /* Depend on that Linear Mapping is ready */ in paging_init()
1458 * can't use hugepage mappings for 2-level page table because in case of in vmemmap_populate()
1468 * Pre-allocates page-table pages for a specific area in the kernel
1469 * page-table. Only the level which needs to be synchronized between
1470 * all page-tables is allocated because the synchronization can be
1511 * process page-tables later. in preallocate_pgd_pages_range()
1513 panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); in preallocate_pgd_pages_range()
1642 while (nr_pages--) in free_vmemmap_storage()
1798 mhp_range.end = __pa(PAGE_END - 1); in arch_get_mappable_range()
1806 create_linear_mapping_range(start, start + size, 0, &params->pgprot); in arch_add_memory()