xref: /nrf52832-nimble/rt-thread/libcpu/arm/realview-a8-vmm/start_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero/*
2*10465441SEvalZero * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero *
4*10465441SEvalZero * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero *
6*10465441SEvalZero * Change Logs:
7*10465441SEvalZero * Date           Author       Notes
8*10465441SEvalZero * 2013-07-05     Bernard      the first version
9*10465441SEvalZero */
10*10465441SEvalZero
11*10465441SEvalZero#include <rtconfig.h>
12*10465441SEvalZero
13*10465441SEvalZero#ifdef RT_USING_VMM
14*10465441SEvalZero#include <vmm.h>
15*10465441SEvalZero.equ orig_irq_isr,    LINUX_VECTOR_POS+0x18
16*10465441SEvalZero#else
17*10465441SEvalZero#undef RT_VMM_USING_DOMAIN
18*10465441SEvalZero#endif
19*10465441SEvalZero
20*10465441SEvalZero.equ Mode_USR,        0x10
21*10465441SEvalZero.equ Mode_FIQ,        0x11
22*10465441SEvalZero.equ Mode_IRQ,        0x12
23*10465441SEvalZero.equ Mode_SVC,        0x13
24*10465441SEvalZero.equ Mode_ABT,        0x17
25*10465441SEvalZero.equ Mode_UND,        0x1B
26*10465441SEvalZero.equ Mode_SYS,        0x1F
27*10465441SEvalZero
28*10465441SEvalZero.equ I_Bit,           0x80            @ when I bit is set, IRQ is disabled
29*10465441SEvalZero.equ F_Bit,           0x40            @ when F bit is set, FIQ is disabled
30*10465441SEvalZero
31*10465441SEvalZero#ifndef RT_USING_VMM
32*10465441SEvalZero.equ UND_Stack_Size,     0x00000000
33*10465441SEvalZero.equ SVC_Stack_Size,     0x00000100
34*10465441SEvalZero.equ ABT_Stack_Size,     0x00000000
35*10465441SEvalZero.equ RT_FIQ_STACK_PGSZ,  0x00000000
36*10465441SEvalZero.equ RT_IRQ_STACK_PGSZ,  0x00000100
37*10465441SEvalZero.equ USR_Stack_Size,     0x00000100
38*10465441SEvalZero
39*10465441SEvalZero#define ISR_Stack_Size  (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
40*10465441SEvalZero                 RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
41*10465441SEvalZero#else
42*10465441SEvalZero#define ISR_Stack_Size  (RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
43*10465441SEvalZero#endif
44*10465441SEvalZero
45*10465441SEvalZero.section .data.share.isr
46*10465441SEvalZero/* stack */
47*10465441SEvalZero.globl stack_start
48*10465441SEvalZero.globl stack_top
49*10465441SEvalZero
50*10465441SEvalZerostack_start:
51*10465441SEvalZero.rept ISR_Stack_Size
52*10465441SEvalZero.byte 0
53*10465441SEvalZero.endr
54*10465441SEvalZerostack_top:
55*10465441SEvalZero
56*10465441SEvalZero.text
57*10465441SEvalZero/* reset entry */
58*10465441SEvalZero.globl _reset
59*10465441SEvalZero_reset:
60*10465441SEvalZero#ifdef RT_USING_VMM
61*10465441SEvalZero    /* save all the parameter and variable registers */
62*10465441SEvalZero    stmfd   sp!, {r0-r12, lr}
63*10465441SEvalZero#endif
64*10465441SEvalZero    /* set the cpu to SVC32 mode and disable interrupt */
65*10465441SEvalZero    mrs     r0, cpsr
66*10465441SEvalZero    bic     r0, r0, #0x1f
67*10465441SEvalZero    orr     r0, r0, #0x13
68*10465441SEvalZero    msr     cpsr_c, r0
69*10465441SEvalZero
70*10465441SEvalZero    /* setup stack */
71*10465441SEvalZero    bl      stack_setup
72*10465441SEvalZero
73*10465441SEvalZero    /* clear .bss */
74*10465441SEvalZero    mov     r0,#0                   /* get a zero                       */
75*10465441SEvalZero    ldr     r1,=__bss_start         /* bss start                        */
76*10465441SEvalZero    ldr     r2,=__bss_end           /* bss end                          */
77*10465441SEvalZero
78*10465441SEvalZerobss_loop:
79*10465441SEvalZero    cmp     r1,r2                   /* check if data to clear           */
80*10465441SEvalZero    strlo   r0,[r1],#4              /* clear 4 bytes                    */
81*10465441SEvalZero    blo     bss_loop                /* loop until done                  */
82*10465441SEvalZero
83*10465441SEvalZero#ifdef RT_USING_VMM
84*10465441SEvalZero    /* clear .bss.share */
85*10465441SEvalZero    mov     r0,#0                   /* get a zero                       */
86*10465441SEvalZero    ldr     r1,=__bss_share_start   /* bss start                        */
87*10465441SEvalZero    ldr     r2,=__bss_share_end     /* bss end                          */
88*10465441SEvalZero
89*10465441SEvalZerobss_share_loop:
90*10465441SEvalZero    cmp     r1,r2                   /* check if data to clear           */
91*10465441SEvalZero    strlo   r0,[r1],#4              /* clear 4 bytes                    */
92*10465441SEvalZero    blo     bss_share_loop                /* loop until done                  */
93*10465441SEvalZero#endif
94*10465441SEvalZero
95*10465441SEvalZero    /* call C++ constructors of global objects                          */
96*10465441SEvalZero    ldr     r0, =__ctors_start__
97*10465441SEvalZero    ldr     r1, =__ctors_end__
98*10465441SEvalZero
99*10465441SEvalZeroctor_loop:
100*10465441SEvalZero    cmp     r0, r1
101*10465441SEvalZero    beq     ctor_end
102*10465441SEvalZero    ldr     r2, [r0], #4
103*10465441SEvalZero    stmfd   sp!, {r0-r1}
104*10465441SEvalZero    mov     lr, pc
105*10465441SEvalZero    bx      r2
106*10465441SEvalZero    ldmfd   sp!, {r0-r1}
107*10465441SEvalZero    b       ctor_loop
108*10465441SEvalZeroctor_end:
109*10465441SEvalZero
110*10465441SEvalZero    /* start RT-Thread Kernel */
111*10465441SEvalZero#ifdef RT_USING_VMM
112*10465441SEvalZero    /* restore the parameter */
113*10465441SEvalZero    ldmfd   sp!, {r0-r3}
114*10465441SEvalZero    bl      vmm_entry
115*10465441SEvalZero    ldmfd   sp!, {r4-r12, pc}
116*10465441SEvalZero#else
117*10465441SEvalZero    ldr     pc, _rtthread_startup
118*10465441SEvalZero_rtthread_startup:
119*10465441SEvalZero    .word rtthread_startup
120*10465441SEvalZero#endif
121*10465441SEvalZero
122*10465441SEvalZerostack_setup:
123*10465441SEvalZero    ldr     r0, =stack_top
124*10465441SEvalZero#ifdef RT_USING_VMM
125*10465441SEvalZero    @ Linux use stmia to save r0, lr and spsr. To align to 8 byte boundary,
126*10465441SEvalZero    @ just allocate 16 bytes for it.
127*10465441SEvalZero    sub     r0, r0, #16
128*10465441SEvalZero#endif
129*10465441SEvalZero
130*10465441SEvalZero#ifndef RT_USING_VMM
131*10465441SEvalZero    @  Set the startup stack for svc
132*10465441SEvalZero    mov     sp, r0
133*10465441SEvalZero#endif
134*10465441SEvalZero
135*10465441SEvalZero#ifndef RT_USING_VMM
136*10465441SEvalZero    @  Enter Undefined Instruction Mode and set its Stack Pointer
137*10465441SEvalZero    msr     cpsr_c, #Mode_UND|I_Bit|F_Bit
138*10465441SEvalZero    mov     sp, r0
139*10465441SEvalZero    sub     r0, r0, #UND_Stack_Size
140*10465441SEvalZero
141*10465441SEvalZero    @  Enter Abort Mode and set its Stack Pointer
142*10465441SEvalZero    msr     cpsr_c, #Mode_ABT|I_Bit|F_Bit
143*10465441SEvalZero    mov     sp, r0
144*10465441SEvalZero    sub     r0, r0, #ABT_Stack_Size
145*10465441SEvalZero#endif
146*10465441SEvalZero
147*10465441SEvalZero    @  Enter FIQ Mode and set its Stack Pointer
148*10465441SEvalZero    msr     cpsr_c, #Mode_FIQ|I_Bit|F_Bit
149*10465441SEvalZero    mov     sp, r0
150*10465441SEvalZero    sub     r0, r0, #RT_FIQ_STACK_PGSZ
151*10465441SEvalZero
152*10465441SEvalZero    @  Enter IRQ Mode and set its Stack Pointer
153*10465441SEvalZero    msr     cpsr_c, #Mode_IRQ|I_Bit|F_Bit
154*10465441SEvalZero    mov     sp, r0
155*10465441SEvalZero    sub     r0, r0, #RT_IRQ_STACK_PGSZ
156*10465441SEvalZero
157*10465441SEvalZero    /* come back to SVC mode */
158*10465441SEvalZero    msr     cpsr_c, #Mode_SVC|I_Bit|F_Bit
159*10465441SEvalZero    bx      lr
160*10465441SEvalZero
161*10465441SEvalZero/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq          */
162*10465441SEvalZero.section .text.isr, "ax"
163*10465441SEvalZero    .align  5
164*10465441SEvalZero.globl vector_fiq
165*10465441SEvalZerovector_fiq:
166*10465441SEvalZero    stmfd   sp!,{r0-r7,lr}
167*10465441SEvalZero    bl      rt_hw_trap_fiq
168*10465441SEvalZero    ldmfd   sp!,{r0-r7,lr}
169*10465441SEvalZero    subs    pc, lr, #4
170*10465441SEvalZero
171*10465441SEvalZero.globl      rt_interrupt_enter
172*10465441SEvalZero.globl      rt_interrupt_leave
173*10465441SEvalZero.globl      rt_thread_switch_interrupt_flag
174*10465441SEvalZero.globl      rt_interrupt_from_thread
175*10465441SEvalZero.globl      rt_interrupt_to_thread
176*10465441SEvalZero
177*10465441SEvalZero.globl      rt_current_thread
178*10465441SEvalZero.globl      vmm_thread
179*10465441SEvalZero.globl      vmm_virq_check
180*10465441SEvalZero
181*10465441SEvalZero    .align  5
182*10465441SEvalZero.globl vector_irq
183*10465441SEvalZerovector_irq:
184*10465441SEvalZero    stmfd   sp!, {r0-r12,lr}
185*10465441SEvalZero
186*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
187*10465441SEvalZero    @ save the last domain
188*10465441SEvalZero    mrc     p15, 0, r5, c3, c0
189*10465441SEvalZero    @ switch to vmm domain as we are going to call vmm codes
190*10465441SEvalZero    ldr     r1, =vmm_domain_val
191*10465441SEvalZero    ldr     r4, [r1]
192*10465441SEvalZero    mcr     p15, 0, r4, c3, c0
193*10465441SEvalZero#endif
194*10465441SEvalZero
195*10465441SEvalZero    bl      rt_interrupt_enter
196*10465441SEvalZero    bl      rt_hw_trap_irq
197*10465441SEvalZero    bl      rt_interrupt_leave
198*10465441SEvalZero
199*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
200*10465441SEvalZero    @ restore the last domain. It do some redundant work but simplify the
201*10465441SEvalZero    @ logic. It might be the guest domain so rt_thread_switch_interrupt_flag
202*10465441SEvalZero    @ should lay in .bss.share
203*10465441SEvalZero    mcr     p15, 0, r5, c3, c0
204*10465441SEvalZero#endif
205*10465441SEvalZero
206*10465441SEvalZero    @ if rt_thread_switch_interrupt_flag set, jump to
207*10465441SEvalZero    @ rt_hw_context_switch_interrupt_do and don't return
208*10465441SEvalZero    ldr     r0, =rt_thread_switch_interrupt_flag
209*10465441SEvalZero    ldr     r1, [r0]
210*10465441SEvalZero    cmp     r1, #1
211*10465441SEvalZero    beq     rt_hw_context_switch_interrupt_do
212*10465441SEvalZero
213*10465441SEvalZero#ifndef RT_USING_VMM
214*10465441SEvalZero    ldmfd   sp!, {r0-r12,lr}
215*10465441SEvalZero    subs    pc,  lr, #4
216*10465441SEvalZero#else
217*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
218*10465441SEvalZero    @ r4 is vmm_domain_val
219*10465441SEvalZero    @ back to vmm domain as we need access rt_current_thread
220*10465441SEvalZero    mcr     p15, 0, r4, c3, c0
221*10465441SEvalZero#endif
222*10465441SEvalZero    /* check whether we need to do IRQ routing
223*10465441SEvalZero     * ensure the int is disabled. Or there will be an infinite loop. */
224*10465441SEvalZero    ldr     r0, =rt_current_thread
225*10465441SEvalZero    ldr     r0, [r0]
226*10465441SEvalZero    ldr     r1, =vmm_thread
227*10465441SEvalZero    cmp     r0, r1
228*10465441SEvalZero    beq     switch_to_guest
229*10465441SEvalZero
230*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
231*10465441SEvalZero    @ r5 is domain of interrupted context
232*10465441SEvalZero    @ it might be super_domain_val or vmm_domain_val so we need to restore it.
233*10465441SEvalZero    mcr     p15, 0, r5, c3, c0
234*10465441SEvalZero#endif
235*10465441SEvalZero    @ switch back if the interrupted thread is not vmm
236*10465441SEvalZero    ldmfd sp!, {r0-r12,lr}
237*10465441SEvalZero    subs  pc, lr, #4
238*10465441SEvalZero
239*10465441SEvalZeroswitch_to_guest:
240*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
241*10465441SEvalZero    @ We are going to execute rt-thread code but accessing the content of the
242*10465441SEvalZero    @ guest. So switch to super domain.
243*10465441SEvalZero    ldr     r1, =super_domain_val
244*10465441SEvalZero    ldr     r0, [r1]
245*10465441SEvalZero    mcr     p15, 0, r0, c3, c0
246*10465441SEvalZero#endif
247*10465441SEvalZero	/* check whether there is a pending interrupt for Guest OS */
248*10465441SEvalZero	bl      vmm_virq_check
249*10465441SEvalZero
250*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
251*10465441SEvalZero    @ All done, restore the guest domain.
252*10465441SEvalZero    mcr     p15, 0, r5, c3, c0
253*10465441SEvalZero#endif
254*10465441SEvalZero
255*10465441SEvalZero	cmp     r0, #0x0
256*10465441SEvalZero	beq     route_irq_to_guest
257*10465441SEvalZero
258*10465441SEvalZero    ldmfd   sp!, {r0-r12,lr}
259*10465441SEvalZero    subs    pc, lr, #4
260*10465441SEvalZero
261*10465441SEvalZeroroute_irq_to_guest:
262*10465441SEvalZero    ldmfd   sp!, {r0-r12,lr}
263*10465441SEvalZero    b       orig_irq_isr
264*10465441SEvalZero#endif /* RT_USING_VMM */
265*10465441SEvalZero
266*10465441SEvalZerort_hw_context_switch_interrupt_do:
267*10465441SEvalZero    mov     r1,  #0         @ clear flag
268*10465441SEvalZero    str     r1,  [r0]
269*10465441SEvalZero
270*10465441SEvalZero    mov     r1, sp          @ r1 point to {r0-r3} in stack
271*10465441SEvalZero    add     sp, sp, #4*4
272*10465441SEvalZero    ldmfd   sp!, {r4-r12,lr}@ reload saved registers
273*10465441SEvalZero    mrs     r0,  spsr       @ get cpsr of interrupt thread
274*10465441SEvalZero    sub     r2,  lr, #4     @ save old task's pc to r2
275*10465441SEvalZero
276*10465441SEvalZero    @ Switch to SVC mode with no interrupt. If the usr mode guest is
277*10465441SEvalZero    @ interrupted, this will just switch to the stack of kernel space.
278*10465441SEvalZero    @ save the registers in kernel space won't trigger data abort.
279*10465441SEvalZero    msr     cpsr_c, #I_Bit|F_Bit|Mode_SVC
280*10465441SEvalZero
281*10465441SEvalZero    stmfd   sp!, {r2}       @ push old task's pc
282*10465441SEvalZero    stmfd   sp!, {r4-r12,lr}@ push old task's lr,r12-r4
283*10465441SEvalZero    ldmfd   r1,  {r1-r4}    @ restore r0-r3 of the interrupt thread
284*10465441SEvalZero    stmfd   sp!, {r1-r4}    @ push old task's r0-r3
285*10465441SEvalZero    stmfd   sp!, {r0}       @ push old task's cpsr
286*10465441SEvalZero
287*10465441SEvalZero    ldr     r4,  =rt_interrupt_from_thread
288*10465441SEvalZero    ldr     r5,  [r4]
289*10465441SEvalZero    str     sp,  [r5]       @ store sp in preempted tasks's TCB
290*10465441SEvalZero
291*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
292*10465441SEvalZero    @ If a thread is wake up by interrupt, it should be RTT thread.
293*10465441SEvalZero    @ Make sure the domain is correct.
294*10465441SEvalZero    ldr     r1, =vmm_domain_val
295*10465441SEvalZero    ldr     r2, [r1]
296*10465441SEvalZero    mcr     p15, 0, r2, c3, c0
297*10465441SEvalZero#endif
298*10465441SEvalZero    ldr     r6,  =rt_interrupt_to_thread
299*10465441SEvalZero    ldr     r6,  [r6]
300*10465441SEvalZero    ldr     sp,  [r6]       @ get new task's stack pointer
301*10465441SEvalZero
302*10465441SEvalZero    ldmfd   sp!, {r4}       @ pop new task's cpsr to spsr
303*10465441SEvalZero    msr     spsr_cxsf, r4
304*10465441SEvalZero
305*10465441SEvalZero    ldmfd   sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
306*10465441SEvalZero
307*10465441SEvalZero.macro push_svc_reg
308*10465441SEvalZero    sub     sp, sp, #17 * 4         @/* Sizeof(struct rt_hw_exp_stack)  */
309*10465441SEvalZero    stmia   sp, {r0 - r12}          @/* Calling r0-r12                  */
310*10465441SEvalZero    mov     r0, sp
311*10465441SEvalZero    mrs     r6, spsr                @/* Save CPSR                       */
312*10465441SEvalZero    str     lr, [r0, #15*4]         @/* Push PC                         */
313*10465441SEvalZero    str     r6, [r0, #16*4]         @/* Push CPSR                       */
314*10465441SEvalZero    cps     #Mode_SVC
315*10465441SEvalZero    str     sp, [r0, #13*4]         @/* Save calling SP                 */
316*10465441SEvalZero    str     lr, [r0, #14*4]         @/* Save calling PC                 */
317*10465441SEvalZero.endm
318*10465441SEvalZero
319*10465441SEvalZero    .align  5
320*10465441SEvalZero    .globl	vector_swi
321*10465441SEvalZerovector_swi:
322*10465441SEvalZero    push_svc_reg
323*10465441SEvalZero    bl      rt_hw_trap_swi
324*10465441SEvalZero    b       .
325*10465441SEvalZero
326*10465441SEvalZero    .align  5
327*10465441SEvalZero    .globl	vector_undef
328*10465441SEvalZerovector_undef:
329*10465441SEvalZero    push_svc_reg
330*10465441SEvalZero    bl      rt_hw_trap_undef
331*10465441SEvalZero    b       .
332*10465441SEvalZero
333*10465441SEvalZero    .align  5
334*10465441SEvalZero    .globl	vector_pabt
335*10465441SEvalZerovector_pabt:
336*10465441SEvalZero    push_svc_reg
337*10465441SEvalZero    bl      rt_hw_trap_pabt
338*10465441SEvalZero    b       .
339*10465441SEvalZero
340*10465441SEvalZero    .align  5
341*10465441SEvalZero    .globl	vector_dabt
342*10465441SEvalZerovector_dabt:
343*10465441SEvalZero    push_svc_reg
344*10465441SEvalZero    bl      rt_hw_trap_dabt
345*10465441SEvalZero    b       .
346*10465441SEvalZero
347*10465441SEvalZero    .align  5
348*10465441SEvalZero    .globl	vector_resv
349*10465441SEvalZerovector_resv:
350*10465441SEvalZero    push_svc_reg
351*10465441SEvalZero    bl      rt_hw_trap_resv
352*10465441SEvalZero    b       .
353