Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
35 #include <asm/mach/map.h>
46 * zero-initialized data and COW.
52 * The pmd table for the upper-most set of pages.
143 int i, selected = -1; in early_cachepolicy()
154 if (selected == -1) in early_cachepolicy()
240 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
467 pr_warn("Forcing write-allocate cache policy for SMP\n"); in build_mem_type_table()
478 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those in build_mem_type_table()
490 * "update-able on write" bit on ARM610). However, Xscale and in build_mem_type_table()
513 * Mark device regions on ARMv6+ as execute-never in build_mem_type_table()
528 * - shared device is SXCB=1100 in build_mem_type_table()
529 * - nonshared device is SXCB=0100 in build_mem_type_table()
530 * - write combine device mem is SXCB=0001 in build_mem_type_table()
539 * - shared device is TEXCB=00101 in build_mem_type_table()
540 * - nonshared device is TEXCB=01000 in build_mem_type_table()
541 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
550 * - shared device is TEXCB=00001 in build_mem_type_table()
551 * - nonshared device is TEXCB=01000 in build_mem_type_table()
552 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
567 * Now deal with the memory-type mappings in build_mem_type_table()
570 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; in build_mem_type_table()
576 * r/o, kernel r/w to map the vectors page. in build_mem_type_table()
583 * in the Short-descriptor translation table format descriptors. in build_mem_type_table()
632 * Non-cacheable Normal - intended for memory areas that must in build_mem_type_table()
637 /* Non-cacheable Normal is XCB = 001 */ in build_mem_type_table()
641 /* For both ARMv6 and non-TEX-remapping ARMv7 */ in build_mem_type_table()
681 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
683 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
685 mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
689 mem_types[MT_ROM].prot_sect |= cp->pmd; in build_mem_type_table()
691 switch (cp->pmd) { in build_mem_type_table()
701 ecc_mask ? "ECC enabled, " : "", cp->policy); in build_mem_type_table()
705 if (t->prot_l1) in build_mem_type_table()
706 t->prot_l1 |= PMD_DOMAIN(t->domain); in build_mem_type_table()
707 if (t->prot_sect) in build_mem_type_table()
708 t->prot_sect |= PMD_DOMAIN(t->domain); in build_mem_type_table()
718 else if (file->f_flags & O_SYNC) in phys_mem_access_prot()
767 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); in alloc_init_pte()
769 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), in alloc_init_pte()
789 * (See arch/arm/include/asm/pgtable-2level.h) in __map_init_section()
795 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); in __map_init_section()
812 * With LPAE, we must loop over to map in alloc_init_pmd()
818 * Try a section mapping - addr, next and phys must all be in alloc_init_pmd()
821 if (type->prot_sect && in alloc_init_pmd()
829 phys += next - addr; in alloc_init_pmd()
845 phys += next - addr; in alloc_init_pud()
860 phys += next - addr; in alloc_init_p4d()
874 addr = md->virtual; in create_36bit_mapping()
875 phys = __pfn_to_phys(md->pfn); in create_36bit_mapping()
876 length = PAGE_ALIGN(md->length); in create_36bit_mapping()
880 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
890 if (type->domain) { in create_36bit_mapping()
892 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
896 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { in create_36bit_mapping()
898 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
906 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); in create_36bit_mapping()
917 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | in create_36bit_mapping()
936 type = &mem_types[md->type]; in __create_mapping()
940 * Catch 36-bit addresses in __create_mapping()
942 if (md->pfn >= 0x100000) { in __create_mapping()
948 addr = md->virtual & PAGE_MASK; in __create_mapping()
949 phys = __pfn_to_phys(md->pfn); in __create_mapping()
950 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in __create_mapping()
952 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { in __create_mapping()
953 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", in __create_mapping()
954 (long long)__pfn_to_phys(md->pfn), addr); in __create_mapping()
965 phys += next - addr; in __create_mapping()
979 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { in create_mapping()
981 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
985 if (md->type == MT_DEVICE && in create_mapping()
986 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && in create_mapping()
987 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { in create_mapping()
989 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
1002 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); in create_mapping_late()
1005 pud = pud_alloc(mm, p4d, md->virtual); in create_mapping_late()
1019 struct vm_struct *vm; in iotable_init() local
1027 for (md = io_desc; nr; md++, nr--) { in iotable_init()
1030 vm = &svm->vm; in iotable_init()
1031 vm->addr = (void *)(md->virtual & PAGE_MASK); in iotable_init()
1032 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in iotable_init()
1033 vm->phys_addr = __pfn_to_phys(md->pfn); in iotable_init()
1034 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; in iotable_init()
1035 vm->flags |= VM_ARM_MTYPE(md->type); in iotable_init()
1036 vm->caller = iotable_init; in iotable_init()
1044 struct vm_struct *vm; in vm_reserve_area_early() local
1049 vm = &svm->vm; in vm_reserve_area_early()
1050 vm->addr = (void *)addr; in vm_reserve_area_early()
1051 vm->size = size; in vm_reserve_area_early()
1052 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; in vm_reserve_area_early()
1053 vm->caller = caller; in vm_reserve_area_early()
1061 * (see definition in include/asm/pgtable-2level.h). However a call to
1068 * Let's avoid the issue by inserting dummy vm entries covering the unused
1080 struct vm_struct *vm; in fill_pmd_gaps() local
1085 vm = &svm->vm; in fill_pmd_gaps()
1086 addr = (unsigned long)vm->addr; in fill_pmd_gaps()
1091 * Check if this vm starts on an odd section boundary. in fill_pmd_gaps()
1102 * Then check if this vm ends on an odd section boundary. in fill_pmd_gaps()
1106 addr += vm->size; in fill_pmd_gaps()
1113 /* no need to look at any vm entry until we hit the next PMD */ in fill_pmd_gaps()
1114 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1140 struct map_desc map; in debug_ll_io_init() local
1142 debug_ll_addr(&map.pfn, &map.virtual); in debug_ll_io_init()
1143 if (!map.pfn || !map.virtual) in debug_ll_io_init()
1145 map.pfn = __phys_to_pfn(map.pfn); in debug_ll_io_init()
1146 map.virtual &= PAGE_MASK; in debug_ll_io_init()
1147 map.length = PAGE_SIZE; in debug_ll_io_init()
1148 map.type = MT_DEVICE; in debug_ll_io_init()
1149 iotable_init(&map, 1); in debug_ll_io_init()
1158 * area - the default is 240MiB.
1171 vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET); in early_vmalloc()
1193 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. in adjust_lowmem_bounds()
1198 vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET - in adjust_lowmem_bounds()
1209 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds()
1229 * Find the first non-pmd-aligned page, and point in adjust_lowmem_bounds()
1231 * limit down to be pmd-aligned, which happens at the in adjust_lowmem_bounds()
1235 * bank can be non-pmd-aligned. The only exception is in adjust_lowmem_bounds()
1236 * that the start of the bank 0 must be section- in adjust_lowmem_bounds()
1253 high_memory = __va(arm_lowmem_limit - 1) + 1; in adjust_lowmem_bounds()
1269 pr_notice("Ignoring RAM at %pa-%pa\n", in adjust_lowmem_bounds()
1273 memblock_remove(memblock_limit, end - memblock_limit); in adjust_lowmem_bounds()
1298 * are using a thumb-compiled kernel, there there will be 8MB more in prepare_page_table()
1309 /* The XIP kernel is mapped in the module area -- skip over it */ in prepare_page_table()
1310 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
1353 * precious DMA-able memory... in arm_mm_memblock_reserve()
1355 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); in arm_mm_memblock_reserve()
1368 struct map_desc map; in devicemaps_init() local
1386 /* create a read-only mapping of the device tree */ in devicemaps_init()
1387 map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); in devicemaps_init()
1388 map.virtual = FDT_FIXED_BASE; in devicemaps_init()
1389 map.length = FDT_FIXED_SIZE; in devicemaps_init()
1390 map.type = MT_MEMORY_RO; in devicemaps_init()
1391 create_mapping(&map); in devicemaps_init()
1395 * Map the cache flushing regions. in devicemaps_init()
1398 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); in devicemaps_init()
1399 map.virtual = FLUSH_BASE; in devicemaps_init()
1400 map.length = SZ_1M; in devicemaps_init()
1401 map.type = MT_CACHECLEAN; in devicemaps_init()
1402 create_mapping(&map); in devicemaps_init()
1405 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); in devicemaps_init()
1406 map.virtual = FLUSH_BASE_MINICACHE; in devicemaps_init()
1407 map.length = SZ_1M; in devicemaps_init()
1408 map.type = MT_MINICLEAN; in devicemaps_init()
1409 create_mapping(&map); in devicemaps_init()
1413 * Create a mapping for the machine vectors at the high-vectors in devicemaps_init()
1414 * location (0xffff0000). If we aren't using high-vectors, also in devicemaps_init()
1415 * create a mapping at the low-vectors virtual address. in devicemaps_init()
1417 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); in devicemaps_init()
1418 map.virtual = 0xffff0000; in devicemaps_init()
1419 map.length = PAGE_SIZE; in devicemaps_init()
1421 map.type = MT_HIGH_VECTORS; in devicemaps_init()
1423 map.type = MT_LOW_VECTORS; in devicemaps_init()
1425 create_mapping(&map); in devicemaps_init()
1428 map.virtual = 0; in devicemaps_init()
1429 map.length = PAGE_SIZE * 2; in devicemaps_init()
1430 map.type = MT_LOW_VECTORS; in devicemaps_init()
1431 create_mapping(&map); in devicemaps_init()
1434 /* Now create a kernel read-only mapping */ in devicemaps_init()
1435 map.pfn += 1; in devicemaps_init()
1436 map.virtual = 0xffff0000 + PAGE_SIZE; in devicemaps_init()
1437 map.length = PAGE_SIZE; in devicemaps_init()
1438 map.type = MT_LOW_VECTORS; in devicemaps_init()
1439 create_mapping(&map); in devicemaps_init()
1442 * Ask the machine support to map in the statically mapped devices. in devicemaps_init()
1444 if (mdesc->map_io) in devicemaps_init()
1445 mdesc->map_io(); in devicemaps_init()
1456 * any write-allocated cache lines in the vector page are written in devicemaps_init()
1482 /* Map all the lowmem memory banks. */ in map_lowmem()
1484 struct map_desc map; in map_lowmem() local
1486 pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n", in map_lowmem()
1502 * the kernel memory from it and map each part separately. We in map_lowmem()
1505 * +--------+ +--------+ in map_lowmem()
1506 * +-- start --+ +--------+ | Kernel | | Kernel | in map_lowmem()
1508 * | | | case 1 | +--------+ | | +--------+ in map_lowmem()
1509 * | Memory | +--------+ | | | Kernel | in map_lowmem()
1510 * | range | +--------+ | | | case 6 | in map_lowmem()
1511 * | | | Kernel | +--------+ | | +--------+ in map_lowmem()
1513 * +-- end ----+ +--------+ | case 4 | | | in map_lowmem()
1514 * +--------+ +--------+ in map_lowmem()
1517 /* Case 5: kernel covers range, don't map anything, should be rare */ in map_lowmem()
1525 /* Map memory below the kernel */ in map_lowmem()
1526 map.pfn = __phys_to_pfn(start); in map_lowmem()
1527 map.virtual = __phys_to_virt(start); in map_lowmem()
1528 map.length = kernel_sec_start - start; in map_lowmem()
1529 map.type = MT_MEMORY_RW; in map_lowmem()
1530 create_mapping(&map); in map_lowmem()
1531 /* Map memory above the kernel */ in map_lowmem()
1532 map.pfn = __phys_to_pfn(kernel_sec_end); in map_lowmem()
1533 map.virtual = __phys_to_virt(kernel_sec_end); in map_lowmem()
1534 map.length = end - kernel_sec_end; in map_lowmem()
1535 map.type = MT_MEMORY_RW; in map_lowmem()
1536 create_mapping(&map); in map_lowmem()
1552 map.pfn = __phys_to_pfn(start); in map_lowmem()
1553 map.virtual = __phys_to_virt(start); in map_lowmem()
1554 map.length = end - start; in map_lowmem()
1555 map.type = MT_MEMORY_RW; in map_lowmem()
1556 create_mapping(&map); in map_lowmem()
1567 * +----------------+ kernel_x_start in map_kernel()
1570 * +----------------+ kernel_x_end / kernel_nx_start in map_kernel()
1571 * | Non-executable | in map_kernel()
1573 * +----------------+ kernel_nx_end in map_kernel()
1579 * non-executable part of the kernel memory is actually mapped as executable. in map_kernel()
1591 struct map_desc map; in map_kernel() local
1594 * Map the kernel if it is XIP. in map_kernel()
1598 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); in map_kernel()
1599 map.virtual = MODULES_VADDR; in map_kernel()
1600 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; in map_kernel()
1601 map.type = MT_ROM; in map_kernel()
1602 create_mapping(&map); in map_kernel()
1604 map.pfn = __phys_to_pfn(kernel_x_start); in map_kernel()
1605 map.virtual = __phys_to_virt(kernel_x_start); in map_kernel()
1606 map.length = kernel_x_end - kernel_x_start; in map_kernel()
1607 map.type = MT_MEMORY_RWX; in map_kernel()
1608 create_mapping(&map); in map_kernel()
1614 map.pfn = __phys_to_pfn(kernel_nx_start); in map_kernel()
1615 map.virtual = __phys_to_virt(kernel_nx_start); in map_kernel()
1616 map.length = kernel_nx_end - kernel_nx_start; in map_kernel()
1617 map.type = MT_MEMORY_RW; in map_kernel()
1618 create_mapping(&map); in map_kernel()
1636 if (!mdesc->pv_fixup) in early_paging_init()
1639 offset = mdesc->pv_fixup(); in early_paging_init()
1663 /* Re-set the phys pfn offset, and the pv offset */ in early_paging_init()
1669 (&__pv_table_end - &__pv_table_begin) << 2); in early_paging_init()
1689 * Fixup the page tables - this must be in the idmap region as in early_paging_init()
1696 /* Re-enable the caches and cacheable TLB walks */ in early_paging_init()
1707 if (!mdesc->pv_fixup) in early_paging_init()
1710 offset = mdesc->pv_fixup(); in early_paging_init()
1725 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); in early_fixmap_shutdown()
1733 struct map_desc map; in early_fixmap_shutdown() local
1735 map.virtual = fix_to_virt(i); in early_fixmap_shutdown()
1736 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); in early_fixmap_shutdown()
1743 map.pfn = pte_pfn(*pte); in early_fixmap_shutdown()
1744 map.type = MT_DEVICE; in early_fixmap_shutdown()
1745 map.length = PAGE_SIZE; in early_fixmap_shutdown()
1747 create_mapping(&map); in early_fixmap_shutdown()
1764 pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", in paging_init()
1812 if (--nr == 0) in set_ptes()