1 /*
2  * Copyright (c) 2008 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <arch/arm64/sregs.h>
25 #include <sys/types.h>
26 #include <string.h>
27 #include <stdlib.h>
28 #include <debug.h>
29 #include <trace.h>
30 #include <kernel/thread.h>
31 #include <arch/arm64.h>
32 #include <platform/random.h>
33 
34 #define LOCAL_TRACE 0
35 
36 struct context_switch_frame {
37     vaddr_t lr;
38     vaddr_t pad;                // Padding to keep frame size a multiple of
39     vaddr_t tpidr_el0;          //  sp alignment requirements (16 bytes)
40     vaddr_t tpidrro_el0;
41     vaddr_t r18;
42     vaddr_t r19;
43     vaddr_t r20;
44     vaddr_t r21;
45     vaddr_t r22;
46     vaddr_t r23;
47     vaddr_t r24;
48     vaddr_t r25;
49     vaddr_t r26;
50     vaddr_t r27;
51     vaddr_t r28;
52     vaddr_t r29;
53 };
54 
55 extern void arm64_context_switch(addr_t *old_sp, addr_t new_sp);
56 
57 static void initial_thread_func(void) __NO_RETURN;
initial_thread_func(void)58 static void initial_thread_func(void)
59 {
60     int ret;
61 
62     thread_t *current_thread = get_current_thread();
63 
64     LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
65 
66     /* release the thread lock that was implicitly held across the reschedule */
67     thread_unlock_ints_disabled();
68     arch_enable_ints();
69 
70     ret = current_thread->entry(current_thread->arg);
71 
72     LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret);
73 
74     thread_exit(ret);
75 }
76 
arch_init_thread_initialize(struct thread * thread,uint cpu)77 __ARCH_NO_PAC void arch_init_thread_initialize(struct thread *thread, uint cpu)
78 {
79     extern uint8_t __stack_end[];
80     size_t stack_size = ARCH_DEFAULT_STACK_SIZE;
81     uint8_t *cpu_stack_end = __stack_end - stack_size * cpu;
82     thread->stack = cpu_stack_end - stack_size;
83     thread->stack_high = cpu_stack_end;
84     thread->stack_size = stack_size;
85 #if KERNEL_SCS_ENABLED
86     extern uint8_t __shadow_stack[];
87     /* shadow stack grows up unlike the regular stack */
88     thread->shadow_stack = __shadow_stack + DEFAULT_SHADOW_STACK_SIZE * cpu;
89     thread->shadow_stack_size = DEFAULT_SHADOW_STACK_SIZE;
90 #endif
91 
92     if (arch_pac_address_supported()) {
93         uint64_t sctlr_el1 = ARM64_READ_SYSREG(SCTLR_EL1);
94 
95         sctlr_el1 &= ~(SCTLR_EL1_ENIA | SCTLR_EL1_ENIB | SCTLR_EL1_ENDA | SCTLR_EL1_ENDB);
96 
97 #if KERNEL_PAC_ENABLED
98         /* Generate and load the instruction A key */
99         platform_random_get_bytes((void *)thread->arch.packeys.apia,
100                                   sizeof(thread->arch.packeys.apia));
101 
102         ARM64_WRITE_SYSREG_RAW(APIAKeyLo_EL1, thread->arch.packeys.apia[0]);
103         ARM64_WRITE_SYSREG_RAW(APIAKeyHi_EL1, thread->arch.packeys.apia[1]);
104 
105         /*
106          * Enable only the A key for use in EL1 and EL0.
107          * PAuth instructions are NOPs for disabled keys.
108          */
109         sctlr_el1 |= SCTLR_EL1_ENIA;
110 #endif
111         /* Ensure PACIxSP are valid BR jump targets in EL0 & EL1 */
112         if (arch_bti_supported()) {
113             sctlr_el1 &= ~(SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
114         }
115 
116         ARM64_WRITE_SYSREG_RAW(SCTLR_EL1, sctlr_el1);
117         ISB;
118     }
119 }
120 
arch_thread_initialize(thread_t * t)121 void arch_thread_initialize(thread_t *t)
122 {
123     // create a default stack frame on the stack
124     vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
125 
126     // make sure the top of the stack is 16 byte aligned for EABI compliance
127     stack_top = round_down(stack_top, 16);
128 
129     struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
130     frame--;
131 
132     // fill it in
133     memset(frame, 0, sizeof(*frame));
134     frame->lr = (vaddr_t)&initial_thread_func;
135 
136     // set the stack pointer
137     t->arch.sp = (vaddr_t)frame;
138 
139     // set the shadow stack pointer
140 #if KERNEL_SCS_ENABLED
141     frame->r18 = (vaddr_t)t->shadow_stack;
142 #endif
143 #if KERNEL_PAC_ENABLED
144     /* Allocate PAC keys */
145     if (arch_pac_address_supported()) {
146         platform_random_get_bytes((void *)t->arch.packeys.apia,
147                                   sizeof(t->arch.packeys.apia));
148     }
149 #endif
150 }
151 
152 /*
153  * Switch context from one thread to another.
154  * This function produces an non-PAC protected stack frame to enable switching.
155  */
arch_context_switch(thread_t * oldthread,thread_t * newthread)156 __ARCH_NO_PAC __NO_INLINE void arch_context_switch(thread_t *oldthread, thread_t *newthread)
157 {
158     LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
159     arm64_fpu_pre_context_switch(oldthread);
160 #if WITH_SMP
161     DSB; /* broadcast tlb operations in case the thread moves to another cpu */
162 #endif
163 #if KERNEL_PAC_ENABLED
164     /* Set new PAC key if supported */
165     if (arch_pac_address_supported()) {
166         ARM64_WRITE_SYSREG_RAW(APIAKeyLo_EL1, newthread->arch.packeys.apia[0]);
167         ARM64_WRITE_SYSREG_RAW(APIAKeyHi_EL1, newthread->arch.packeys.apia[1]);
168         ISB;
169     }
170 #endif
171     /*
172      * Call the assembly helper.  As a tail-call, lr will point to this
173      * function's caller, which due to __ARCH_NO_PAC and __NO_INLINE will not
174      * have a PAC - if PAC is enabled.
175      */
176     arm64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
177 }
178 
arch_dump_thread(thread_t * t)179 void arch_dump_thread(thread_t *t)
180 {
181     if (t->state != THREAD_RUNNING) {
182         dprintf(INFO, "\tarch: ");
183         dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
184     }
185 }
186