1 /*
2 * Copyright (c) 2021 Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <arch/arm64/mmu.h>
25 #include <arch/ops.h>
26 #include <assert.h>
27 #include <kernel/vm.h>
28 #include <lk/compiler.h>
29 #include <panic.h>
30 #include <sys/types.h>
31
32 #if ARM64_BOOT_PROTOCOL_X0_DTB
33 #include <lib/device_tree/libfdt_helpers.h>
34 #endif
35
get_aspace_flags(void)36 static uint get_aspace_flags(void) {
37 uint aspace_flags = ARCH_ASPACE_FLAG_KERNEL;
38
39 #ifdef KERNEL_BTI_ENABLED
40 if (arch_bti_supported()) {
41 aspace_flags |= ARCH_ASPACE_FLAG_BTI;
42 }
43 #endif
44
45 return aspace_flags;
46 }
47
48 /* trampoline translation table */
49 extern pte_t tt_trampoline[MMU_PAGE_TABLE_ENTRIES_IDENT];
50
51 /* the main translation table */
52 pte_t arm64_kernel_translation_table[MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP]
53 __ALIGNED(MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP * 8);
54
early_mmu_paddr_to_kvaddr(paddr_t paddr)55 static void* early_mmu_paddr_to_kvaddr(paddr_t paddr) {
56 return (void*)paddr;
57 }
58
alloc_page_table(paddr_t * paddrp,uint page_size_shift)59 static int alloc_page_table(paddr_t* paddrp, uint page_size_shift) {
60 const size_t size = 1UL << page_size_shift;
61 paddr_t paddr = (paddr_t)boot_alloc_memalign(size, size);
62 *paddrp = paddr;
63 return 0;
64 }
65
free_page_table(void * vaddr,paddr_t paddr,uint page_size_shift)66 static void free_page_table(void* vaddr,
67 paddr_t paddr,
68 uint page_size_shift) {
69 /* If we get here then we can't boot, so halt */
70 panic("reached free_page_table during early boot\n");
71 }
72
73 /*
74 * Override paddr_to_kvaddr since it's implemented in kernel/vm.c
75 * and we don't want to change that.
76 */
77 #define paddr_to_kvaddr early_mmu_paddr_to_kvaddr
78 #define EARLY_MMU
79 #include "mmu.inc"
80 #undef paddr_to_kvaddr
81
arch_mmu_map_early(vaddr_t vaddr,paddr_t paddr,size_t size,uint flags)82 void arch_mmu_map_early(vaddr_t vaddr,
83 paddr_t paddr,
84 size_t size,
85 uint flags) {
86 pte_t attr;
87 bool res = mmu_flags_to_pte_attr(get_aspace_flags(), flags, &attr);
88 ASSERT(res);
89 const uintptr_t vaddr_top_mask = ~0UL << MMU_KERNEL_SIZE_SHIFT;
90 ASSERT((vaddr & vaddr_top_mask) == vaddr_top_mask);
91 int ret = arm64_mmu_map_pt(vaddr, vaddr ^ vaddr_top_mask, paddr, size, attr,
92 MMU_KERNEL_TOP_SHIFT, MMU_KERNEL_PAGE_SIZE_SHIFT,
93 arm64_kernel_translation_table,
94 MMU_ARM64_GLOBAL_ASID, false);
95 ASSERT(!ret);
96 }
97
98 #if ARM64_BOOT_PROTOCOL_X0_MEMSIZE
map_trampoline(paddr_t paddr,size_t size)99 static inline void map_trampoline(paddr_t paddr, size_t size) {}
100
arm64_get_ram_size(ulong ram_size_or_dtb_addr,paddr_t kernel_paddr)101 ulong arm64_get_ram_size(ulong ram_size_or_dtb_addr, paddr_t kernel_paddr) {
102 return ram_size_or_dtb_addr;
103 }
104 #elif ARM64_BOOT_PROTOCOL_X0_DTB
map_trampoline(paddr_t paddr,size_t size)105 static void map_trampoline(paddr_t paddr, size_t size) {
106 paddr_t end = paddr + (size - 1);
107 paddr_t i = paddr >> MMU_IDENT_TOP_SHIFT;
108 paddr_t end_i = end >> MMU_IDENT_TOP_SHIFT;
109 pte_t attrs = arm64_tagging_supported() ? MMU_PTE_IDENT_FLAGS_TAGGED
110 : MMU_PTE_IDENT_FLAGS;
111
112 /*
113 * Remove MMU_PTE_IDENT_DESCRIPTOR since arm64_mmu_map_pt will select this
114 * on its own.
115 */
116 attrs &= ~MMU_PTE_IDENT_DESCRIPTOR;
117
118 for (; i <= end_i; i++) {
119 if (!tt_trampoline[i]) {
120 int ret = arm64_mmu_map_pt(
121 i << MMU_IDENT_TOP_SHIFT, i << MMU_IDENT_TOP_SHIFT,
122 i << MMU_IDENT_TOP_SHIFT, 1 << MMU_IDENT_TOP_SHIFT, attrs,
123 MMU_IDENT_TOP_SHIFT, MMU_IDENT_PAGE_SIZE_SHIFT,
124 tt_trampoline, MMU_ARM64_GLOBAL_ASID, false);
125 ASSERT(!ret);
126 }
127 }
128 }
129
arm64_get_ram_size(ulong ram_size_or_dtb_addr,paddr_t kernel_paddr)130 ulong arm64_get_ram_size(ulong ram_size_or_dtb_addr, paddr_t kernel_paddr) {
131 const void *fdt = (const void *)ram_size_or_dtb_addr;
132 int offset;
133 paddr_t mem_base, mem_size;
134
135 /* Make sure device-tree is mapped */
136 map_trampoline(ram_size_or_dtb_addr, FDT_V1_SIZE);
137 if (fdt_magic(fdt) != FDT_MAGIC) {
138 panic("No device tree found at %p\n", fdt);
139 }
140 map_trampoline(ram_size_or_dtb_addr, fdt_totalsize(fdt));
141
142 offset = fdt_node_offset_by_prop_value(fdt, 0, "device_type", "memory", 7);
143 if (fdt_helper_get_reg(fdt, offset, 0, &mem_base, &mem_size)) {
144 panic("No memory node found in device tree\n");
145 }
146
147 if ((kernel_paddr >= mem_base) && (kernel_paddr - mem_base < mem_size)) {
148 /*
149 * TODO: Allow using memory below kernel base. For now subtract this
150 * from mem_size and ignore this memory.
151 */
152 return mem_size - (kernel_paddr - mem_base);
153 }
154
155 panic("kernel_paddr, 0x%" PRIxPADDR ", not in memory range: 0x%" PRIxPADDR
156 ", size 0x%" PRIxPADDR "\n",
157 kernel_paddr, mem_base, mem_size);
158 }
159 #else
160 #error "Unknown ARM64_BOOT_PROTOCOL"
161 #endif
162
arm64_early_mmu_init(ulong ram_size_or_dtb_addr,uintptr_t * relr_start,uintptr_t * relr_end,paddr_t kernel_paddr)163 void arm64_early_mmu_init(ulong ram_size_or_dtb_addr, uintptr_t* relr_start,
164 uintptr_t* relr_end, paddr_t kernel_paddr) {
165 const uintptr_t kernel_initial_vaddr = KERNEL_BASE + KERNEL_LOAD_OFFSET;
166 uintptr_t virt_offset = kernel_initial_vaddr - kernel_paddr;
167 update_relocation_entries(relr_start, relr_end, virt_offset);
168
169 /* Relocate the kernel to its physical address */
170 relocate_kernel(relr_start, relr_end, kernel_initial_vaddr, kernel_paddr);
171
172 ulong ram_size = arm64_get_ram_size(ram_size_or_dtb_addr, kernel_paddr);
173
174 /* Map any ram not already mapped in trampoline page table */
175 map_trampoline(kernel_paddr, ram_size);
176
177 vm_assign_initial_dynamic(kernel_paddr, ram_size);
178 vaddr_t kernel_final_vaddr =
179 aslr_randomize_kernel_base(kernel_initial_vaddr);
180 vm_map_initial_mappings();
181
182 /* Relocate the kernel to its final virtual address */
183 relocate_kernel(relr_start, relr_end, kernel_paddr, kernel_final_vaddr);
184 }
185