1 /*
2 * Copyright (c) 2014-2016 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <debug.h>
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #include <arch.h>
27 #include <arch/ops.h>
28 #include <arch/arm64.h>
29 #include <arch/arm64/mmu.h>
30 #include <arch/mp.h>
31 #include <kernel/thread.h>
32 #include <lk/init.h>
33 #include <lk/main.h>
34 #include <platform.h>
35 #include <trace.h>
36 #include <inttypes.h>
37
38 #define LOCAL_TRACE 0
39
40 #if WITH_SMP
41 /* smp boot lock */
42 static spin_lock_t arm_boot_cpu_lock = 1;
43 static volatile int secondaries_to_init = 0;
44 #endif
45
46 extern void arm64_enter_uspace(ulong arg0, vaddr_t entry_point,
47 vaddr_t user_stack_top,
48 vaddr_t shadow_stack_base,
49 uint64_t spsr) __NO_RETURN;
50
arm64_cpu_early_init(void)51 static void arm64_cpu_early_init(void)
52 {
53 /* set the vector base */
54 ARM64_WRITE_SYSREG(VBAR_EL1, (uint64_t)&arm64_exception_base);
55
56 /* switch to EL1 */
57 unsigned int current_el = ARM64_READ_SYSREG(CURRENTEL) >> 2;
58 if (current_el > 1) {
59 arm64_el3_to_el1();
60 }
61
62 arch_enable_fiqs();
63 }
64
arch_early_init(void)65 void arch_early_init(void)
66 {
67 arm64_cpu_early_init();
68 platform_init_mmu_mappings();
69 }
70
arch_init(void)71 void arch_init(void)
72 {
73 #if WITH_SMP
74 arch_mp_init_percpu();
75
76 LTRACEF("midr_el1 0x%" PRIx64 "\n", ARM64_READ_SYSREG(midr_el1));
77
78 secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */
79
80 lk_init_secondary_cpus(secondaries_to_init);
81
82 LTRACEF("releasing %d secondary cpus\n", secondaries_to_init);
83
84 /* release the secondary cpus */
85 spin_unlock(&arm_boot_cpu_lock);
86
87 /* flush the release of the lock, since the secondary cpus are running without cache on */
88 arch_clean_cache_range((addr_t)&arm_boot_cpu_lock, sizeof(arm_boot_cpu_lock));
89 #endif
90 dprintf(SPEW, "mte is %savailable\n", arch_tagging_enabled() ? "" : "not ");
91 }
92
arch_quiesce(void)93 void arch_quiesce(void)
94 {
95 }
96
arch_idle(void)97 void arch_idle(void)
98 {
99 __asm__ volatile("wfi");
100 }
101
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)102 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
103 {
104 PANIC_UNIMPLEMENTED;
105 }
106
107 /*
108 * switch to user mode, set the user stack pointer to user_stack_top, set
109 * x18 to shadow_stack_base, put the svc stack pointer to the top of the
110 * kernel stack.
111 */
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top,vaddr_t shadow_stack_base,uint32_t flags,ulong arg0)112 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top, vaddr_t shadow_stack_base, uint32_t flags, ulong arg0)
113 {
114 bool is_32bit_uspace = (flags & ARCH_ENTER_USPACE_FLAG_32BIT);
115 user_stack_top = round_down(user_stack_top, is_32bit_uspace ? 8 : 16);
116
117 thread_t *ct = get_current_thread();
118
119 vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
120 kernel_stack_top = round_down(kernel_stack_top, 16);
121
122 #if !USER_SCS_SUPPORTED
123 assert(shadow_stack_base == 0);
124 #endif
125
126 /* set up a default spsr to get into 64bit user space:
127 * zeroed NZCV
128 * no SS, no IL, no D
129 * all interrupts enabled
130 * mode 0: EL0t
131 */
132 uint64_t spsr = is_32bit_uspace ? 0x10 : 0;
133
134 arch_disable_ints();
135
136 arm64_enter_uspace(arg0, entry_point, user_stack_top, shadow_stack_base,
137 spsr);
138 __UNREACHABLE;
139 }
140
141 #if WITH_SMP
arm64_secondary_entry(ulong asm_cpu_num)142 void arm64_secondary_entry(ulong asm_cpu_num)
143 {
144 uint cpu = arch_curr_cpu_num();
145 if (cpu != asm_cpu_num)
146 return;
147
148 arm64_cpu_early_init();
149
150 spin_lock(&arm_boot_cpu_lock);
151 spin_unlock(&arm_boot_cpu_lock);
152
153 /* run early secondary cpu init routines up to the threading level */
154 lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
155
156 arch_mp_init_percpu();
157
158 LTRACEF("cpu num %d\n", cpu);
159
160 /* we're done, tell the main cpu we're up */
161 atomic_add(&secondaries_to_init, -1);
162 __asm__ volatile("sev");
163
164 lk_secondary_cpu_entry();
165 }
166 #endif
167
arch_set_user_tls(vaddr_t tls_ptr)168 void arch_set_user_tls(vaddr_t tls_ptr)
169 {
170 /*
171 * Note arm32 user space uses the ro TLS register and arm64 uses rw.
172 * This matches existing ABIs.
173 */
174 #ifdef USER_32BIT
175 /* Lower bits of tpidrro_el0 aliased with arm32 tpidruro. */
176 __asm__ volatile("msr tpidrro_el0, %0" :: "r" (tls_ptr));
177 #else
178 /* Can also set from user space. Implemented here for uniformity. */
179 __asm__ volatile("msr tpidr_el0, %0" :: "r" (tls_ptr));
180 #endif
181 }
182