1/*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <asm.h>
24#include <arch/asm_macros.h>
25
26.section .text.boot.vectab
27.align 12
28
29#define lr x30
30#define regsave_long_offset (30 * 8)
31#define regsave_short_offset (20 * 8)
32
33.macro regsave, regsave_offset:req, reset_stack:req, double_fault=0
34.if \double_fault
35    /*
36     * Use elr of original fault instead of new fault to get a more useful
37     * stack trace. Report new elr as lr instead.
38     */
39    mrs     lr, elr_el1
40.else
41    /* Save x0 and x1 in sp_el1 scratch space so we have some free registers */
42    stp     x0, x1, [sp]
43    mrs     x1, elr_el1 /* Save elr_el1 early in x1 in case we fault again */
44.endif
45
46    /* x0 = new sp_el0. Instructions following regsave may rely on property. */
47.if \reset_stack
48    mrs     x0, tpidr_el1
49    ldr     x0, [x0, 8]
50    bic     x0, x0, #0xf /* align stack */
51.else
52    mrs     x0, sp_el0
53.endif
54
55.if \regsave_offset == regsave_long_offset
56    push    x28, lr, rsp=x0, prealloc=32
57    push    x26, x27, rsp=x0
58    push    x24, x25, rsp=x0
59    push    x22, x23, rsp=x0
60    push    x20, x21, rsp=x0
61    push    x18, x19, rsp=x0
62.elseif \regsave_offset == regsave_short_offset
63    push    x18, lr, rsp=x0, prealloc=32
64.else
65    .error  "regsave: unsupported regsave_offset, \regsave_offset"
66.endif
67
68    push    x16, x17, rsp=x0
69    push    x14, x15, rsp=x0
70    push    x12, x13, rsp=x0
71    push    x10, x11, rsp=x0
72    push    x8, x9, rsp=x0
73    push    x6, x7, rsp=x0
74    push    x4, x5, rsp=x0
75    push    x2, x3, rsp=x0
76
77    /*
78     * Push entry values of x0 and x1 saved in sp_el1 scratch space. If we fault
79     * again below this point, the new iframe will not have the correct values
80     * for x2 and x3, but we should still be able to get a stack trace. spsr
81     * will also be wrong for double faults as the code below will read the new
82     * spsr not the one from the original exception.
83     */
84    ldp     x2, x3, [sp]
85    push    x2, x3, rsp=x0
86
87    mrs     x2, sp_el0
88    mrs     x3, spsr_el1
89    stp     x2, x3, [x0, #\regsave_offset]
90    stp     fp, x1, [x0, #\regsave_offset + 16]
91    msr     spsel, #0
92    /* instructions following regsave may rely on x0 holding xp to save space */
93    mov     sp, x0
94    add     fp, sp, #\regsave_offset + 16
95#if KERNEL_SCS_ENABLED
96.if \reset_stack
97    mrs     x18, tpidr_el1
98    ldr     x18, [x18, 24] /* thread_t.shadow_stack */
99.endif
100#endif
101.endm
102
103.macro regsave_long, reset_stack, double_fault=0
104    regsave regsave_offset=regsave_long_offset, reset_stack=\reset_stack, \
105            double_fault=\double_fault
106.endm
107
108.macro regsave_short, reset_stack
109    regsave regsave_offset=regsave_short_offset, reset_stack=\reset_stack
110.endm
111
112.macro regrestore, regsave_offset:req
113    mov     lr, sp
114
115    /*
116     * Switch to EL1h so a fault after restoring sp_el0 to a user-space stack
117     * pointer does not cause the kernel fault handler to run on the user-space
118     * stack.
119     */
120    msr     spsel, #1
121
122    ldp     x0, x2, [lr, #\regsave_offset]
123    ldp     fp, x1, [lr, #\regsave_offset + 16]
124    msr     sp_el0, x0
125    msr     elr_el1, x1
126    msr     spsr_el1, x2
127    pop     x0, x1, rsp=lr
128    pop     x2, x3, rsp=lr
129    pop     x4, x5, rsp=lr
130    pop     x6, x7, rsp=lr
131    pop     x8, x9, rsp=lr
132    pop     x10, x11, rsp=lr
133    pop     x12, x13, rsp=lr
134    pop     x14, x15, rsp=lr
135    pop     x16, x17, rsp=lr
136.if \regsave_offset == regsave_long_offset
137    pop     x18, x19, rsp=lr
138    pop     x20, x21, rsp=lr
139    pop     x22, x23, rsp=lr
140    pop     x24, x25, rsp=lr
141    pop     x26, x27, rsp=lr
142    ldp     x28, lr, [lr]
143.elseif \regsave_offset == regsave_short_offset
144    ldp     x18, lr, [lr]
145.else
146    .error  "regsave: unsupported regsave_offset, \regsave_offset"
147.endif
148.endm
149
150.macro regrestore_long
151    regrestore regsave_offset=regsave_long_offset
152.endm
153
154.macro regrestore_short
155    regrestore regsave_offset=regsave_short_offset
156.endm
157
158.macro invalid_exception, which, reset_stack, double_fault=0
159    regsave_long reset_stack=\reset_stack, double_fault=\double_fault
160    /* x0 holds sp after regsave */
161    mov x1, #\which
162    bl  arm64_invalid_exception
163.if \reset_stack == 0 /* do we have room for the following instruction? */
164    b . /* unreachable: arm64_invalid_exception calls _panic */
165.endif
166.endm
167
168.macro irq_exception, reset_stack
169    regsave_short reset_stack=\reset_stack
170#if !ARM_MERGE_FIQ_IRQ
171    msr daifclr, #1 /* reenable fiqs once elr and spsr have been saved */
172#endif
173    /* x0 holds sp after regsave */
174    bl  platform_irq
175    cbz x0, .Lirq_exception_no_preempt\@
176    bl  thread_preempt
177.Lirq_exception_no_preempt\@:
178#if !ARM_MERGE_FIQ_IRQ
179    msr daifset, #1 /* disable fiqs to protect elr and spsr restore */
180#endif
181    b   arm64_exc_shared_restore_short
182.endm
183
184.macro fiq_exception, reset_stack
185#if ARM_MERGE_FIQ_IRQ
186    irq_exception reset_stack=\reset_stack
187#else
188    regsave_short reset_stack=\reset_stack
189    /* x0 holds sp after regsave */
190    bl  platform_fiq
191    b  arm64_exc_shared_restore_short
192#endif
193.endm
194
195FUNCTION(arm64_exception_base)
196
197/* exceptions from current EL, using SP0 */
198LOCAL_FUNCTION(arm64_sync_exc_current_el_SP0)
199    regsave_long reset_stack=0
200    /* x0 holds sp after regsave */
201    mov x1, #0 /* from_lower = false */
202    b arm64_sync_exc_shared
203
204.org 0x080
205LOCAL_FUNCTION(arm64_irq_current_el_SP0)
206    irq_exception reset_stack=0
207
208.org 0x100
209LOCAL_FUNCTION(arm64_fiq_current_el_SP0)
210    fiq_exception reset_stack=0
211
212.org 0x180
213LOCAL_FUNCTION(arm64_err_exc_current_el_SP0)
214    invalid_exception 3, reset_stack=0
215
216/* exceptions from current EL, using SPx */
217.org 0x200
218LOCAL_FUNCTION(arm64_sync_exc_current_el_SPx)
219    /*
220     * This fault will occur of we the kernel stack pointer is not valid when
221     * we try to push or pop the iframe. Reset the stack pointer in this case
222     * so panic can run. This should work for faults caused by stack overflows,
223     * but not if the stack in the thread struct is also bad.
224     */
225    invalid_exception 0x10, reset_stack=1, double_fault=1
226
227.org 0x280
228LOCAL_FUNCTION(arm64_irq_current_el_SPx)
229    invalid_exception 0x11, reset_stack=0
230
231.org 0x300
232LOCAL_FUNCTION(arm64_fiq_current_el_SPx)
233    invalid_exception 0x12, reset_stack=0
234
235.org 0x380
236LOCAL_FUNCTION(arm64_err_exc_current_el_SPx)
237    invalid_exception 0x13, reset_stack=0
238
239/* exceptions from lower EL, running arm64 */
240.org 0x400
241LOCAL_FUNCTION(arm64_sync_exc_lower_el_64)
242    /*
243     * Reset the stack pointer for this and all other exceptions from user-space
244     * since the kernel stack should be empty when user-space is running, and we
245     * don't want to use the user-space stack, which is the current stack
246     * pointer, while in the kernel. The user-space stack pointer will be saved
247     * on entry and restored on return.
248     */
249    regsave_long reset_stack=1
250    /* x0 holds sp after regsave */
251    mov x1, #1 /* from_lower = true */
252    b arm64_sync_exc_shared
253
254.org 0x480
255LOCAL_FUNCTION(arm64_irq_lower_el_64)
256    irq_exception reset_stack=1
257
258.org 0x500
259LOCAL_FUNCTION(arm64_fiq_lower_el_64)
260    fiq_exception reset_stack=1
261
262.org 0x580
263LOCAL_FUNCTION(arm64_err_exc_lower_el_64)
264    invalid_exception 0x23, reset_stack=1
265
266/* exceptions from lower EL, running arm32 */
267.org 0x600
268LOCAL_FUNCTION(arm64_sync_exc_lower_el_32)
269    regsave_long reset_stack=1
270    /* x0 holds sp after regsave */
271    mov x1, #1 /* from_lower = true */
272    b arm64_sync_exc_shared
273
274.org 0x680
275LOCAL_FUNCTION(arm64_irq_lower_el_32)
276    irq_exception reset_stack=1
277
278.org 0x700
279LOCAL_FUNCTION(arm64_fiq_lower_el_32)
280    fiq_exception reset_stack=1
281
282.org 0x780
283LOCAL_FUNCTION(arm64_err_exc_lower_el_32)
284    invalid_exception 0x33, reset_stack=1
285
286LOCAL_FUNCTION(arm64_sync_exc_shared)
287    bl arm64_sync_exception /* x0 and x1 must be set by caller */
288    regrestore_long
289    eret
290
291LOCAL_FUNCTION(arm64_exc_shared_restore_short)
292    regrestore_short
293    eret
294
295/*
296 * void arm64_enter_uspace(ulong arg0, vaddr_t entry_point,
297 *                         vaddr_t user_stack_top, vaddr_t shadow_stack_base,
298 *                         uint64_t spsr)
299 */
300FUNCTION(arm64_enter_uspace)
301    /*
302     * Put input registers (x1-x4) into their destination registers before
303     * zeroing them. Register x0 already contains the desired value (arg0).
304     */
305    mov    x13, x2      /* AArch32 SP_usr = user_stack_top */
306    msr    spsel, #1    /* Switch to EL1h before setting a user-space sp */
307    msr    sp_el0, x2   /* AArch64 SP_usr = user_stack_top  */
308    mov    x18, x3      /* AArch64 shadow stack = shadow_stack_base */
309    msr    elr_el1, x1  /* entry_point */
310    msr    spsr_el1, x4 /* spsr */
311
312    /* zero remaining registers */
313    mov    x1, xzr
314    mov    x2, xzr
315    mov    x3, xzr
316    mov    x4, xzr
317    mov    x5, xzr
318    mov    x6, xzr
319    mov    x7, xzr
320    mov    x8, xzr
321    mov    x9, xzr
322    mov    x10, xzr
323    mov    x11, xzr
324    mov    x12, xzr
325    mov    x14, xzr     /* AArch32 LR_usr */
326    mov    x15, xzr
327    mov    x16, xzr
328    mov    x17, xzr
329    mov    x19, xzr
330    mov    x20, xzr
331    mov    x21, xzr
332    mov    x22, xzr
333    mov    x23, xzr
334    mov    x24, xzr
335    mov    x25, xzr
336    mov    x26, xzr
337    mov    x27, xzr
338    mov    x28, xzr
339    mov    x29, xzr
340    mov    x30, xzr     /* AArch64 LR_usr */
341
342    eret
343