xref: /nrf52832-nimble/rt-thread/libcpu/arm/cortex-m7/context_rvds.S (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero;/*
2*10465441SEvalZero; * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero; *
4*10465441SEvalZero; * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero; *
6*10465441SEvalZero; * Change Logs:
7*10465441SEvalZero; * Date           Author       Notes
8*10465441SEvalZero; * 2009-01-17     Bernard      first version.
9*10465441SEvalZero; * 2012-01-01     aozima       support context switch load/store FPU register.
10*10465441SEvalZero; * 2013-06-18     aozima       add restore MSP feature.
11*10465441SEvalZero; * 2013-06-23     aozima       support lazy stack optimized.
12*10465441SEvalZero; * 2018-07-24     aozima       enhancement hard fault exception handler.
13*10465441SEvalZero; */
14*10465441SEvalZero
15*10465441SEvalZero;/**
16*10465441SEvalZero; * @addtogroup cortex-m4
17*10465441SEvalZero; */
18*10465441SEvalZero;/*@{*/
19*10465441SEvalZero
20*10465441SEvalZeroSCB_VTOR        EQU     0xE000ED08               ; Vector Table Offset Register
21*10465441SEvalZeroNVIC_INT_CTRL   EQU     0xE000ED04               ; interrupt control state register
22*10465441SEvalZeroNVIC_SYSPRI2    EQU     0xE000ED20               ; system priority register (2)
23*10465441SEvalZeroNVIC_PENDSV_PRI EQU     0x00FF0000               ; PendSV priority value (lowest)
24*10465441SEvalZeroNVIC_PENDSVSET  EQU     0x10000000               ; value to trigger PendSV exception
25*10465441SEvalZero
26*10465441SEvalZero    AREA |.text|, CODE, READONLY, ALIGN=2
27*10465441SEvalZero    THUMB
28*10465441SEvalZero    REQUIRE8
29*10465441SEvalZero    PRESERVE8
30*10465441SEvalZero
31*10465441SEvalZero    IMPORT rt_thread_switch_interrupt_flag
32*10465441SEvalZero    IMPORT rt_interrupt_from_thread
33*10465441SEvalZero    IMPORT rt_interrupt_to_thread
34*10465441SEvalZero
35*10465441SEvalZero;/*
36*10465441SEvalZero; * rt_base_t rt_hw_interrupt_disable();
37*10465441SEvalZero; */
38*10465441SEvalZerort_hw_interrupt_disable    PROC
39*10465441SEvalZero    EXPORT  rt_hw_interrupt_disable
40*10465441SEvalZero    MRS     r0, PRIMASK
41*10465441SEvalZero    CPSID   I
42*10465441SEvalZero    BX      LR
43*10465441SEvalZero    ENDP
44*10465441SEvalZero
45*10465441SEvalZero;/*
46*10465441SEvalZero; * void rt_hw_interrupt_enable(rt_base_t level);
47*10465441SEvalZero; */
48*10465441SEvalZerort_hw_interrupt_enable    PROC
49*10465441SEvalZero    EXPORT  rt_hw_interrupt_enable
50*10465441SEvalZero    MSR     PRIMASK, r0
51*10465441SEvalZero    BX      LR
52*10465441SEvalZero    ENDP
53*10465441SEvalZero
54*10465441SEvalZero;/*
55*10465441SEvalZero; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
56*10465441SEvalZero; * r0 --> from
57*10465441SEvalZero; * r1 --> to
58*10465441SEvalZero; */
59*10465441SEvalZerort_hw_context_switch_interrupt
60*10465441SEvalZero    EXPORT rt_hw_context_switch_interrupt
61*10465441SEvalZerort_hw_context_switch    PROC
62*10465441SEvalZero    EXPORT rt_hw_context_switch
63*10465441SEvalZero
64*10465441SEvalZero    ; set rt_thread_switch_interrupt_flag to 1
65*10465441SEvalZero    LDR     r2, =rt_thread_switch_interrupt_flag
66*10465441SEvalZero    LDR     r3, [r2]
67*10465441SEvalZero    CMP     r3, #1
68*10465441SEvalZero    BEQ     _reswitch
69*10465441SEvalZero    MOV     r3, #1
70*10465441SEvalZero    STR     r3, [r2]
71*10465441SEvalZero
72*10465441SEvalZero    LDR     r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
73*10465441SEvalZero    STR     r0, [r2]
74*10465441SEvalZero
75*10465441SEvalZero_reswitch
76*10465441SEvalZero    LDR     r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
77*10465441SEvalZero    STR     r1, [r2]
78*10465441SEvalZero
79*10465441SEvalZero    LDR     r0, =NVIC_INT_CTRL              ; trigger the PendSV exception (causes context switch)
80*10465441SEvalZero    LDR     r1, =NVIC_PENDSVSET
81*10465441SEvalZero    STR     r1, [r0]
82*10465441SEvalZero    BX      LR
83*10465441SEvalZero    ENDP
84*10465441SEvalZero
85*10465441SEvalZero; r0 --> switch from thread stack
86*10465441SEvalZero; r1 --> switch to thread stack
87*10465441SEvalZero; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
88*10465441SEvalZeroPendSV_Handler   PROC
89*10465441SEvalZero    EXPORT PendSV_Handler
90*10465441SEvalZero
91*10465441SEvalZero    ; disable interrupt to protect context switch
92*10465441SEvalZero    MRS     r2, PRIMASK
93*10465441SEvalZero    CPSID   I
94*10465441SEvalZero
95*10465441SEvalZero    ; get rt_thread_switch_interrupt_flag
96*10465441SEvalZero    LDR     r0, =rt_thread_switch_interrupt_flag
97*10465441SEvalZero    LDR     r1, [r0]
98*10465441SEvalZero    CBZ     r1, pendsv_exit         ; pendsv already handled
99*10465441SEvalZero
100*10465441SEvalZero    ; clear rt_thread_switch_interrupt_flag to 0
101*10465441SEvalZero    MOV     r1, #0x00
102*10465441SEvalZero    STR     r1, [r0]
103*10465441SEvalZero
104*10465441SEvalZero    LDR     r0, =rt_interrupt_from_thread
105*10465441SEvalZero    LDR     r1, [r0]
106*10465441SEvalZero    CBZ     r1, switch_to_thread    ; skip register save at the first time
107*10465441SEvalZero
108*10465441SEvalZero    MRS     r1, psp                 ; get from thread stack pointer
109*10465441SEvalZero
110*10465441SEvalZero    IF      {FPU} != "SoftVFP"
111*10465441SEvalZero    TST     lr, #0x10               ; if(!EXC_RETURN[4])
112*10465441SEvalZero    VSTMFDEQ  r1!, {d8 - d15}       ; push FPU register s16~s31
113*10465441SEvalZero    ENDIF
114*10465441SEvalZero
115*10465441SEvalZero    STMFD   r1!, {r4 - r11}         ; push r4 - r11 register
116*10465441SEvalZero
117*10465441SEvalZero    IF      {FPU} != "SoftVFP"
118*10465441SEvalZero    MOV     r4, #0x00               ; flag = 0
119*10465441SEvalZero
120*10465441SEvalZero    TST     lr, #0x10               ; if(!EXC_RETURN[4])
121*10465441SEvalZero    MOVEQ   r4, #0x01               ; flag = 1
122*10465441SEvalZero
123*10465441SEvalZero    STMFD   r1!, {r4}               ; push flag
124*10465441SEvalZero    ENDIF
125*10465441SEvalZero
126*10465441SEvalZero    LDR     r0, [r0]
127*10465441SEvalZero    STR     r1, [r0]                ; update from thread stack pointer
128*10465441SEvalZero
129*10465441SEvalZeroswitch_to_thread
130*10465441SEvalZero    LDR     r1, =rt_interrupt_to_thread
131*10465441SEvalZero    LDR     r1, [r1]
132*10465441SEvalZero    LDR     r1, [r1]                ; load thread stack pointer
133*10465441SEvalZero
134*10465441SEvalZero    IF      {FPU} != "SoftVFP"
135*10465441SEvalZero    LDMFD   r1!, {r3}               ; pop flag
136*10465441SEvalZero    ENDIF
137*10465441SEvalZero
138*10465441SEvalZero    LDMFD   r1!, {r4 - r11}         ; pop r4 - r11 register
139*10465441SEvalZero
140*10465441SEvalZero    IF      {FPU} != "SoftVFP"
141*10465441SEvalZero    CMP     r3,  #0                 ; if(flag_r3 != 0)
142*10465441SEvalZero    VLDMFDNE  r1!, {d8 - d15}       ; pop FPU register s16~s31
143*10465441SEvalZero    ENDIF
144*10465441SEvalZero
145*10465441SEvalZero    MSR     psp, r1                 ; update stack pointer
146*10465441SEvalZero
147*10465441SEvalZero    IF      {FPU} != "SoftVFP"
148*10465441SEvalZero    ORR     lr, lr, #0x10           ; lr |=  (1 << 4), clean FPCA.
149*10465441SEvalZero    CMP     r3,  #0                 ; if(flag_r3 != 0)
150*10465441SEvalZero    BICNE   lr, lr, #0x10           ; lr &= ~(1 << 4), set FPCA.
151*10465441SEvalZero    ENDIF
152*10465441SEvalZero
153*10465441SEvalZeropendsv_exit
154*10465441SEvalZero    ; restore interrupt
155*10465441SEvalZero    MSR     PRIMASK, r2
156*10465441SEvalZero
157*10465441SEvalZero    ORR     lr, lr, #0x04
158*10465441SEvalZero    BX      lr
159*10465441SEvalZero    ENDP
160*10465441SEvalZero
161*10465441SEvalZero;/*
162*10465441SEvalZero; * void rt_hw_context_switch_to(rt_uint32 to);
163*10465441SEvalZero; * r0 --> to
164*10465441SEvalZero; * this fucntion is used to perform the first thread switch
165*10465441SEvalZero; */
166*10465441SEvalZerort_hw_context_switch_to    PROC
167*10465441SEvalZero    EXPORT rt_hw_context_switch_to
168*10465441SEvalZero    ; set to thread
169*10465441SEvalZero    LDR     r1, =rt_interrupt_to_thread
170*10465441SEvalZero    STR     r0, [r1]
171*10465441SEvalZero
172*10465441SEvalZero    IF      {FPU} != "SoftVFP"
173*10465441SEvalZero    ; CLEAR CONTROL.FPCA
174*10465441SEvalZero    MRS     r2, CONTROL             ; read
175*10465441SEvalZero    BIC     r2, #0x04               ; modify
176*10465441SEvalZero    MSR     CONTROL, r2             ; write-back
177*10465441SEvalZero    ENDIF
178*10465441SEvalZero
179*10465441SEvalZero    ; set from thread to 0
180*10465441SEvalZero    LDR     r1, =rt_interrupt_from_thread
181*10465441SEvalZero    MOV     r0, #0x0
182*10465441SEvalZero    STR     r0, [r1]
183*10465441SEvalZero
184*10465441SEvalZero    ; set interrupt flag to 1
185*10465441SEvalZero    LDR     r1, =rt_thread_switch_interrupt_flag
186*10465441SEvalZero    MOV     r0, #1
187*10465441SEvalZero    STR     r0, [r1]
188*10465441SEvalZero
189*10465441SEvalZero    ; set the PendSV exception priority
190*10465441SEvalZero    LDR     r0, =NVIC_SYSPRI2
191*10465441SEvalZero    LDR     r1, =NVIC_PENDSV_PRI
192*10465441SEvalZero    LDR.W   r2, [r0,#0x00]       ; read
193*10465441SEvalZero    ORR     r1,r1,r2             ; modify
194*10465441SEvalZero    STR     r1, [r0]             ; write-back
195*10465441SEvalZero
196*10465441SEvalZero    ; trigger the PendSV exception (causes context switch)
197*10465441SEvalZero    LDR     r0, =NVIC_INT_CTRL
198*10465441SEvalZero    LDR     r1, =NVIC_PENDSVSET
199*10465441SEvalZero    STR     r1, [r0]
200*10465441SEvalZero
201*10465441SEvalZero    ; restore MSP
202*10465441SEvalZero    LDR     r0, =SCB_VTOR
203*10465441SEvalZero    LDR     r0, [r0]
204*10465441SEvalZero    LDR     r0, [r0]
205*10465441SEvalZero    MSR     msp, r0
206*10465441SEvalZero
207*10465441SEvalZero    ; enable interrupts at processor level
208*10465441SEvalZero    CPSIE   F
209*10465441SEvalZero    CPSIE   I
210*10465441SEvalZero
211*10465441SEvalZero    ; never reach here!
212*10465441SEvalZero    ENDP
213*10465441SEvalZero
214*10465441SEvalZero; compatible with old version
215*10465441SEvalZerort_hw_interrupt_thread_switch PROC
216*10465441SEvalZero    EXPORT rt_hw_interrupt_thread_switch
217*10465441SEvalZero    BX      lr
218*10465441SEvalZero    ENDP
219*10465441SEvalZero
220*10465441SEvalZero    IMPORT rt_hw_hard_fault_exception
221*10465441SEvalZero    EXPORT HardFault_Handler
222*10465441SEvalZero    EXPORT MemManage_Handler
223*10465441SEvalZeroHardFault_Handler    PROC
224*10465441SEvalZeroMemManage_Handler
225*10465441SEvalZero
226*10465441SEvalZero    ; get current context
227*10465441SEvalZero    TST     lr, #0x04               ; if(!EXC_RETURN[2])
228*10465441SEvalZero    ITE     EQ
229*10465441SEvalZero    MRSEQ   r0, msp                 ; [2]=0 ==> Z=1, get fault context from handler.
230*10465441SEvalZero    MRSNE   r0, psp                 ; [2]=1 ==> Z=0, get fault context from thread.
231*10465441SEvalZero
232*10465441SEvalZero    STMFD   r0!, {r4 - r11}         ; push r4 - r11 register
233*10465441SEvalZero    IF      {FPU} != "SoftVFP"
234*10465441SEvalZero    STMFD   r0!, {lr}               ; push dummy for flag
235*10465441SEvalZero    ENDIF
236*10465441SEvalZero    STMFD   r0!, {lr}               ; push exec_return register
237*10465441SEvalZero
238*10465441SEvalZero    TST     lr, #0x04               ; if(!EXC_RETURN[2])
239*10465441SEvalZero    ITE     EQ
240*10465441SEvalZero    MSREQ   msp, r0                 ; [2]=0 ==> Z=1, update stack pointer to MSP.
241*10465441SEvalZero    MSRNE   psp, r0                 ; [2]=1 ==> Z=0, update stack pointer to PSP.
242*10465441SEvalZero
243*10465441SEvalZero    PUSH    {lr}
244*10465441SEvalZero    BL      rt_hw_hard_fault_exception
245*10465441SEvalZero    POP     {lr}
246*10465441SEvalZero
247*10465441SEvalZero    ORR     lr, lr, #0x04
248*10465441SEvalZero    BX      lr
249*10465441SEvalZero    ENDP
250*10465441SEvalZero
251*10465441SEvalZero    ALIGN   4
252*10465441SEvalZero
253*10465441SEvalZero    END
254