xref: /nrf52832-nimble/rt-thread/libcpu/arm/realview-a8-vmm/start_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1/*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date           Author       Notes
8 * 2013-07-05     Bernard      the first version
9 */
10
11#include <rtconfig.h>
12
13#ifdef RT_USING_VMM
14#include <vmm.h>
15.equ orig_irq_isr,    LINUX_VECTOR_POS+0x18
16#else
17#undef RT_VMM_USING_DOMAIN
18#endif
19
20.equ Mode_USR,        0x10
21.equ Mode_FIQ,        0x11
22.equ Mode_IRQ,        0x12
23.equ Mode_SVC,        0x13
24.equ Mode_ABT,        0x17
25.equ Mode_UND,        0x1B
26.equ Mode_SYS,        0x1F
27
28.equ I_Bit,           0x80            @ when I bit is set, IRQ is disabled
29.equ F_Bit,           0x40            @ when F bit is set, FIQ is disabled
30
31#ifndef RT_USING_VMM
32.equ UND_Stack_Size,     0x00000000
33.equ SVC_Stack_Size,     0x00000100
34.equ ABT_Stack_Size,     0x00000000
35.equ RT_FIQ_STACK_PGSZ,  0x00000000
36.equ RT_IRQ_STACK_PGSZ,  0x00000100
37.equ USR_Stack_Size,     0x00000100
38
39#define ISR_Stack_Size  (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
40                 RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
41#else
42#define ISR_Stack_Size  (RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
43#endif
44
45.section .data.share.isr
46/* stack */
47.globl stack_start
48.globl stack_top
49
50stack_start:
51.rept ISR_Stack_Size
52.byte 0
53.endr
54stack_top:
55
56.text
57/* reset entry */
58.globl _reset
59_reset:
60#ifdef RT_USING_VMM
61    /* save all the parameter and variable registers */
62    stmfd   sp!, {r0-r12, lr}
63#endif
64    /* set the cpu to SVC32 mode and disable interrupt */
65    mrs     r0, cpsr
66    bic     r0, r0, #0x1f
67    orr     r0, r0, #0x13
68    msr     cpsr_c, r0
69
70    /* setup stack */
71    bl      stack_setup
72
73    /* clear .bss */
74    mov     r0,#0                   /* get a zero                       */
75    ldr     r1,=__bss_start         /* bss start                        */
76    ldr     r2,=__bss_end           /* bss end                          */
77
78bss_loop:
79    cmp     r1,r2                   /* check if data to clear           */
80    strlo   r0,[r1],#4              /* clear 4 bytes                    */
81    blo     bss_loop                /* loop until done                  */
82
83#ifdef RT_USING_VMM
84    /* clear .bss.share */
85    mov     r0,#0                   /* get a zero                       */
86    ldr     r1,=__bss_share_start   /* bss start                        */
87    ldr     r2,=__bss_share_end     /* bss end                          */
88
89bss_share_loop:
90    cmp     r1,r2                   /* check if data to clear           */
91    strlo   r0,[r1],#4              /* clear 4 bytes                    */
92    blo     bss_share_loop                /* loop until done                  */
93#endif
94
95    /* call C++ constructors of global objects                          */
96    ldr     r0, =__ctors_start__
97    ldr     r1, =__ctors_end__
98
99ctor_loop:
100    cmp     r0, r1
101    beq     ctor_end
102    ldr     r2, [r0], #4
103    stmfd   sp!, {r0-r1}
104    mov     lr, pc
105    bx      r2
106    ldmfd   sp!, {r0-r1}
107    b       ctor_loop
108ctor_end:
109
110    /* start RT-Thread Kernel */
111#ifdef RT_USING_VMM
112    /* restore the parameter */
113    ldmfd   sp!, {r0-r3}
114    bl      vmm_entry
115    ldmfd   sp!, {r4-r12, pc}
116#else
117    ldr     pc, _rtthread_startup
118_rtthread_startup:
119    .word rtthread_startup
120#endif
121
122stack_setup:
123    ldr     r0, =stack_top
124#ifdef RT_USING_VMM
125    @ Linux use stmia to save r0, lr and spsr. To align to 8 byte boundary,
126    @ just allocate 16 bytes for it.
127    sub     r0, r0, #16
128#endif
129
130#ifndef RT_USING_VMM
131    @  Set the startup stack for svc
132    mov     sp, r0
133#endif
134
135#ifndef RT_USING_VMM
136    @  Enter Undefined Instruction Mode and set its Stack Pointer
137    msr     cpsr_c, #Mode_UND|I_Bit|F_Bit
138    mov     sp, r0
139    sub     r0, r0, #UND_Stack_Size
140
141    @  Enter Abort Mode and set its Stack Pointer
142    msr     cpsr_c, #Mode_ABT|I_Bit|F_Bit
143    mov     sp, r0
144    sub     r0, r0, #ABT_Stack_Size
145#endif
146
147    @  Enter FIQ Mode and set its Stack Pointer
148    msr     cpsr_c, #Mode_FIQ|I_Bit|F_Bit
149    mov     sp, r0
150    sub     r0, r0, #RT_FIQ_STACK_PGSZ
151
152    @  Enter IRQ Mode and set its Stack Pointer
153    msr     cpsr_c, #Mode_IRQ|I_Bit|F_Bit
154    mov     sp, r0
155    sub     r0, r0, #RT_IRQ_STACK_PGSZ
156
157    /* come back to SVC mode */
158    msr     cpsr_c, #Mode_SVC|I_Bit|F_Bit
159    bx      lr
160
161/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq          */
162.section .text.isr, "ax"
163    .align  5
164.globl vector_fiq
165vector_fiq:
166    stmfd   sp!,{r0-r7,lr}
167    bl      rt_hw_trap_fiq
168    ldmfd   sp!,{r0-r7,lr}
169    subs    pc, lr, #4
170
171.globl      rt_interrupt_enter
172.globl      rt_interrupt_leave
173.globl      rt_thread_switch_interrupt_flag
174.globl      rt_interrupt_from_thread
175.globl      rt_interrupt_to_thread
176
177.globl      rt_current_thread
178.globl      vmm_thread
179.globl      vmm_virq_check
180
181    .align  5
182.globl vector_irq
183vector_irq:
184    stmfd   sp!, {r0-r12,lr}
185
186#ifdef RT_VMM_USING_DOMAIN
187    @ save the last domain
188    mrc     p15, 0, r5, c3, c0
189    @ switch to vmm domain as we are going to call vmm codes
190    ldr     r1, =vmm_domain_val
191    ldr     r4, [r1]
192    mcr     p15, 0, r4, c3, c0
193#endif
194
195    bl      rt_interrupt_enter
196    bl      rt_hw_trap_irq
197    bl      rt_interrupt_leave
198
199#ifdef RT_VMM_USING_DOMAIN
200    @ restore the last domain. It do some redundant work but simplify the
201    @ logic. It might be the guest domain so rt_thread_switch_interrupt_flag
202    @ should lay in .bss.share
203    mcr     p15, 0, r5, c3, c0
204#endif
205
206    @ if rt_thread_switch_interrupt_flag set, jump to
207    @ rt_hw_context_switch_interrupt_do and don't return
208    ldr     r0, =rt_thread_switch_interrupt_flag
209    ldr     r1, [r0]
210    cmp     r1, #1
211    beq     rt_hw_context_switch_interrupt_do
212
213#ifndef RT_USING_VMM
214    ldmfd   sp!, {r0-r12,lr}
215    subs    pc,  lr, #4
216#else
217#ifdef RT_VMM_USING_DOMAIN
218    @ r4 is vmm_domain_val
219    @ back to vmm domain as we need access rt_current_thread
220    mcr     p15, 0, r4, c3, c0
221#endif
222    /* check whether we need to do IRQ routing
223     * ensure the int is disabled. Or there will be an infinite loop. */
224    ldr     r0, =rt_current_thread
225    ldr     r0, [r0]
226    ldr     r1, =vmm_thread
227    cmp     r0, r1
228    beq     switch_to_guest
229
230#ifdef RT_VMM_USING_DOMAIN
231    @ r5 is domain of interrupted context
232    @ it might be super_domain_val or vmm_domain_val so we need to restore it.
233    mcr     p15, 0, r5, c3, c0
234#endif
235    @ switch back if the interrupted thread is not vmm
236    ldmfd sp!, {r0-r12,lr}
237    subs  pc, lr, #4
238
239switch_to_guest:
240#ifdef RT_VMM_USING_DOMAIN
241    @ We are going to execute rt-thread code but accessing the content of the
242    @ guest. So switch to super domain.
243    ldr     r1, =super_domain_val
244    ldr     r0, [r1]
245    mcr     p15, 0, r0, c3, c0
246#endif
247	/* check whether there is a pending interrupt for Guest OS */
248	bl      vmm_virq_check
249
250#ifdef RT_VMM_USING_DOMAIN
251    @ All done, restore the guest domain.
252    mcr     p15, 0, r5, c3, c0
253#endif
254
255	cmp     r0, #0x0
256	beq     route_irq_to_guest
257
258    ldmfd   sp!, {r0-r12,lr}
259    subs    pc, lr, #4
260
261route_irq_to_guest:
262    ldmfd   sp!, {r0-r12,lr}
263    b       orig_irq_isr
264#endif /* RT_USING_VMM */
265
266rt_hw_context_switch_interrupt_do:
267    mov     r1,  #0         @ clear flag
268    str     r1,  [r0]
269
270    mov     r1, sp          @ r1 point to {r0-r3} in stack
271    add     sp, sp, #4*4
272    ldmfd   sp!, {r4-r12,lr}@ reload saved registers
273    mrs     r0,  spsr       @ get cpsr of interrupt thread
274    sub     r2,  lr, #4     @ save old task's pc to r2
275
276    @ Switch to SVC mode with no interrupt. If the usr mode guest is
277    @ interrupted, this will just switch to the stack of kernel space.
278    @ save the registers in kernel space won't trigger data abort.
279    msr     cpsr_c, #I_Bit|F_Bit|Mode_SVC
280
281    stmfd   sp!, {r2}       @ push old task's pc
282    stmfd   sp!, {r4-r12,lr}@ push old task's lr,r12-r4
283    ldmfd   r1,  {r1-r4}    @ restore r0-r3 of the interrupt thread
284    stmfd   sp!, {r1-r4}    @ push old task's r0-r3
285    stmfd   sp!, {r0}       @ push old task's cpsr
286
287    ldr     r4,  =rt_interrupt_from_thread
288    ldr     r5,  [r4]
289    str     sp,  [r5]       @ store sp in preempted tasks's TCB
290
291#ifdef RT_VMM_USING_DOMAIN
292    @ If a thread is wake up by interrupt, it should be RTT thread.
293    @ Make sure the domain is correct.
294    ldr     r1, =vmm_domain_val
295    ldr     r2, [r1]
296    mcr     p15, 0, r2, c3, c0
297#endif
298    ldr     r6,  =rt_interrupt_to_thread
299    ldr     r6,  [r6]
300    ldr     sp,  [r6]       @ get new task's stack pointer
301
302    ldmfd   sp!, {r4}       @ pop new task's cpsr to spsr
303    msr     spsr_cxsf, r4
304
305    ldmfd   sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
306
307.macro push_svc_reg
308    sub     sp, sp, #17 * 4         @/* Sizeof(struct rt_hw_exp_stack)  */
309    stmia   sp, {r0 - r12}          @/* Calling r0-r12                  */
310    mov     r0, sp
311    mrs     r6, spsr                @/* Save CPSR                       */
312    str     lr, [r0, #15*4]         @/* Push PC                         */
313    str     r6, [r0, #16*4]         @/* Push CPSR                       */
314    cps     #Mode_SVC
315    str     sp, [r0, #13*4]         @/* Save calling SP                 */
316    str     lr, [r0, #14*4]         @/* Save calling PC                 */
317.endm
318
319    .align  5
320    .globl	vector_swi
321vector_swi:
322    push_svc_reg
323    bl      rt_hw_trap_swi
324    b       .
325
326    .align  5
327    .globl	vector_undef
328vector_undef:
329    push_svc_reg
330    bl      rt_hw_trap_undef
331    b       .
332
333    .align  5
334    .globl	vector_pabt
335vector_pabt:
336    push_svc_reg
337    bl      rt_hw_trap_pabt
338    b       .
339
340    .align  5
341    .globl	vector_dabt
342vector_dabt:
343    push_svc_reg
344    bl      rt_hw_trap_dabt
345    b       .
346
347    .align  5
348    .globl	vector_resv
349vector_resv:
350    push_svc_reg
351    bl      rt_hw_trap_resv
352    b       .
353