xref: /nrf52832-nimble/rt-thread/libcpu/arm/realview-a8-vmm/context_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1/*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date           Author       Notes
8 * 2013-07-05     Bernard      the first version
9 */
10
11#include <rtconfig.h>
12
13#ifdef RT_USING_VMM
14#include <vmm.h>
15#endif
16
17.section .text, "ax"
18/*
19 * rt_base_t rt_hw_interrupt_disable();
20 */
21.globl rt_hw_interrupt_disable
22rt_hw_interrupt_disable:
23    mrs r0, cpsr
24    cpsid i
25    bx  lr
26
27/*
28 * void rt_hw_interrupt_enable(rt_base_t level);
29 */
30.globl rt_hw_interrupt_enable
31rt_hw_interrupt_enable:
32    msr cpsr, r0
33    bx  lr
34
35/*
36 * void rt_hw_context_switch_to(rt_uint32 to);
37 * r0 --> to
38 */
39.globl rt_hw_context_switch_to
40rt_hw_context_switch_to:
41    ldr sp, [r0]            @ get new task stack pointer
42
43    ldmfd sp!, {r4}         @ pop new task spsr
44    msr spsr_cxsf, r4
45
46    ldmfd sp!, {r0-r12, lr, pc}^   @ pop new task r0-r12, lr & pc
47
48.section .bss.share.isr
49_guest_switch_lvl:
50    .word 0
51
52.globl vmm_virq_update
53
54.section .text.isr, "ax"
55/*
56 * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
57 * r0 --> from
58 * r1 --> to
59 */
60.globl rt_hw_context_switch
61rt_hw_context_switch:
62    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
63    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
64
65    mrs r4, cpsr
66    tst lr, #0x01
67    orrne r4, r4, #0x20     @ it's thumb code
68
69    stmfd sp!, {r4}         @ push cpsr
70
71    str sp, [r0]            @ store sp in preempted tasks TCB
72    ldr sp, [r1]            @ get new task stack pointer
73
74#ifdef RT_USING_VMM
75#ifdef RT_VMM_USING_DOMAIN
76    @ need to make sure we are in vmm domain as we would use rt_current_thread
77    ldr     r2, =vmm_domain_val
78    ldr     r7, [r2]
79    mcr     p15, 0, r7, c3, c0
80#endif
81
82    /* check whether vmm thread, otherwise, update vIRQ */
83    ldr     r3, =rt_current_thread
84    ldr     r4, [r3]
85    ldr     r5, =vmm_thread
86    cmp     r4, r5
87    beq     switch_to_guest
88
89    @ not falling into guest. Simple task ;-)
90    ldmfd sp!, {r6}         @ pop new task cpsr to spsr
91    msr spsr_cxsf, r6
92    ldmfd sp!, {r0-r12, lr, pc}^
93
94switch_to_guest:
95#ifdef RT_VMM_USING_DOMAIN
96    @ the stack is saved in the guest domain so we need to
97    @ come back to the guest domain to get the registers.
98    ldr     r1, =super_domain_val
99    ldr     r0, [r1]
100    mcr     p15, 0, r0, c3, c0
101#endif
102    /* The user can do nearly anything in rt_thread_idle_excute because it will
103    call the thread->cleanup. One common thing is sending events and wake up
104    threads. So the guest thread will be preempted. This is the only point that
105    the guest thread would call rt_hw_context_switch and "yield".
106
107    More over, rt_schedule will call this function and this function *will*
108    reentrant. If that happens, we need to make sure that call the
109    rt_thread_idle_excute and vmm_virq_update again and we are in super domain.
110    I use a "reference count" to achieve such behaviour. If you have better
111    idea, tell me. */
112    ldr     r4, =_guest_switch_lvl
113    ldr     r5, [r4]
114    add     r5, r5, #1
115    str     r5, [r4]
116    cmp     r5, #1
117    bne     _switch_through
118
119    bl      rt_thread_idle_excute
120    bl      vmm_virq_update
121
122    /* we need _guest_switch_lvl to protect until _switch_through, but it's OK
123     * to cleanup the reference count here because the code below will not be
124     * reentrant. */
125    sub     r5, r5, #1
126    str     r5, [r4]
127
128#ifdef RT_VMM_USING_DOMAIN
129    ldr     r1, =guest_domain_val
130    ldr     r0, [r1]
131    mcr     p15, 0, r0, c3, c0
132#endif
133_switch_through:
134#endif /* RT_USING_VMM */
135    ldmfd sp!, {r4}         @ pop new task cpsr to spsr
136    msr spsr_cxsf, r4
137    ldmfd sp!, {r0-r12, lr, pc}^  @ pop new task r0-r12, lr & pc, copy spsr to cpsr
138
139/*
140 * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
141 */
142.globl rt_thread_switch_interrupt_flag
143.globl rt_interrupt_from_thread
144.globl rt_interrupt_to_thread
145.globl rt_hw_context_switch_interrupt
146rt_hw_context_switch_interrupt:
147    ldr r2, =rt_thread_switch_interrupt_flag
148    ldr r3, [r2]
149    cmp r3, #1
150    beq _reswitch
151    ldr ip, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
152    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
153    str r0, [ip]
154    str r3, [r2]
155_reswitch:
156    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
157    str r1, [r2]
158    bx  lr
159