1/*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <asm.h>
24#include <arch/asm_macros.h>
25
26/* void arm64_context_switch(vaddr_t *old_sp, vaddr_t new_sp); */
27FUNCTION(arm64_context_switch)
28    /* save old frame */
29    push x28, x29
30    push x26, x27
31    push x24, x25
32    push x22, x23
33    push x20, x21
34    push x18, x19
35    mrs  x18, tpidr_el0
36    mrs  x19, tpidrro_el0
37    push x18, x19
38    push x30, xzr
39
40    /* save old sp */
41    mov  x15, sp
42    str  x15, [x0]
43
44    /* load new sp */
45    mov  sp, x1
46
47    /* restore new frame */
48    pop  x30, xzr
49    pop  x18, x19
50    msr  tpidr_el0, x18
51    msr  tpidrro_el0, x19
52    pop  x18, x19
53    pop  x20, x21
54    pop  x22, x23
55    pop  x24, x25
56    pop  x26, x27
57    pop  x28, x29
58
59    ret
60
61FUNCTION(arm64_el3_to_el1)
62    /* set EL2 to 64bit */
63    mrs x0, scr_el3
64    orr x0, x0, #(1<<10)
65    msr scr_el3, x0
66
67    /* set EL1 to 64bit */
68    mov x0, #(1<<31)
69    msr hcr_el2, x0
70
71    /* disable EL2 coprocessor traps */
72    mov x0, #0x33ff
73    msr cptr_el2, x0
74
75    /* disable EL1 FPU traps */
76    mov x0, #(0b11<<20)
77    msr cpacr_el1, x0
78
79    /* set up the EL1 bounce interrupt */
80    mov x0, sp
81    msr sp_el1, x0
82
83    adr x0, .Ltarget
84    msr elr_el3, x0
85
86    mov x0, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
87    msr spsr_el3, x0
88    isb
89
90    eret
91
92FUNCTION(arm64_elX_to_el1)
93    mrs x4, CurrentEL
94
95    cmp x4, #(0b01 << 2)
96    bne .notEL1
97    /* Already in EL1 */
98    ret
99
100.notEL1:
101    cmp x4, #(0b10 << 2)
102    beq .inEL2
103
104
105    /* set EL2 to 64bit */
106    mrs x4, scr_el3
107    orr x4, x4, #(1<<10)
108    msr scr_el3, x4
109
110
111    adr x4, .Ltarget
112    msr elr_el3, x4
113
114    mov x4, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
115    msr spsr_el3, x4
116    b   .confEL1
117
118.inEL2:
119    adr x4, .Ltarget
120    msr elr_el2, x4
121    mov x4, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
122    msr spsr_el2, x4
123
124
125
126.confEL1:
127    /* disable EL2 coprocessor traps */
128    mov x0, #0x33ff
129    msr cptr_el2, x0
130
131    /* set EL1 to 64bit */
132    mov x0, #(1<<31)
133    msr hcr_el2, x0
134
135    /* disable EL1 FPU traps */
136    mov x0, #(0b11<<20)
137    msr cpacr_el1, x0
138
139    /* set up the EL1 bounce interrupt */
140    mov x0, sp
141    msr sp_el1, x0
142
143    isb
144    eret
145
146
147.Ltarget:
148    ret
149
150/* void platform_early_halt(void); */
151WEAK_FUNCTION(platform_early_halt)
152    /* Disable interrupts and FIQs */
153    msr daifset, #3
154
155    /* Infinite loop */
156    b   .
157