xref: /nrf52832-nimble/rt-thread/libcpu/arm/cortex-m4/context_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero/*
2*10465441SEvalZero * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero *
4*10465441SEvalZero * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero *
6*10465441SEvalZero * Change Logs:
7*10465441SEvalZero * Date           Author       Notes
8*10465441SEvalZero * 2009-10-11     Bernard      first version
9*10465441SEvalZero * 2012-01-01     aozima       support context switch load/store FPU register.
10*10465441SEvalZero * 2013-06-18     aozima       add restore MSP feature.
11*10465441SEvalZero * 2013-06-23     aozima       support lazy stack optimized.
12*10465441SEvalZero * 2018-07-24     aozima       enhancement hard fault exception handler.
13*10465441SEvalZero */
14*10465441SEvalZero
15*10465441SEvalZero/**
16*10465441SEvalZero * @addtogroup cortex-m4
17*10465441SEvalZero */
18*10465441SEvalZero/*@{*/
19*10465441SEvalZero
20*10465441SEvalZero.cpu cortex-m4
21*10465441SEvalZero.syntax unified
22*10465441SEvalZero.thumb
23*10465441SEvalZero.text
24*10465441SEvalZero
25*10465441SEvalZero.equ    SCB_VTOR,           0xE000ED08              /* Vector Table Offset Register */
26*10465441SEvalZero.equ    NVIC_INT_CTRL,      0xE000ED04              /* interrupt control state register */
27*10465441SEvalZero.equ    NVIC_SYSPRI2,       0xE000ED20              /* system priority register (2) */
28*10465441SEvalZero.equ    NVIC_PENDSV_PRI,    0x00FF0000              /* PendSV priority value (lowest) */
29*10465441SEvalZero.equ    NVIC_PENDSVSET,     0x10000000              /* value to trigger PendSV exception */
30*10465441SEvalZero
31*10465441SEvalZero/*
32*10465441SEvalZero * rt_base_t rt_hw_interrupt_disable();
33*10465441SEvalZero */
34*10465441SEvalZero.global rt_hw_interrupt_disable
35*10465441SEvalZero.type rt_hw_interrupt_disable, %function
36*10465441SEvalZerort_hw_interrupt_disable:
37*10465441SEvalZero    MRS     r0, PRIMASK
38*10465441SEvalZero    CPSID   I
39*10465441SEvalZero    BX      LR
40*10465441SEvalZero
41*10465441SEvalZero/*
42*10465441SEvalZero * void rt_hw_interrupt_enable(rt_base_t level);
43*10465441SEvalZero */
44*10465441SEvalZero.global rt_hw_interrupt_enable
45*10465441SEvalZero.type rt_hw_interrupt_enable, %function
46*10465441SEvalZerort_hw_interrupt_enable:
47*10465441SEvalZero    MSR     PRIMASK, r0
48*10465441SEvalZero    BX      LR
49*10465441SEvalZero
50*10465441SEvalZero/*
51*10465441SEvalZero * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
52*10465441SEvalZero * r0 --> from
53*10465441SEvalZero * r1 --> to
54*10465441SEvalZero */
55*10465441SEvalZero.global rt_hw_context_switch_interrupt
56*10465441SEvalZero.type rt_hw_context_switch_interrupt, %function
57*10465441SEvalZero.global rt_hw_context_switch
58*10465441SEvalZero.type rt_hw_context_switch, %function
59*10465441SEvalZero
60*10465441SEvalZerort_hw_context_switch_interrupt:
61*10465441SEvalZerort_hw_context_switch:
62*10465441SEvalZero    /* set rt_thread_switch_interrupt_flag to 1 */
63*10465441SEvalZero    LDR     r2, =rt_thread_switch_interrupt_flag
64*10465441SEvalZero    LDR     r3, [r2]
65*10465441SEvalZero    CMP     r3, #1
66*10465441SEvalZero    BEQ     _reswitch
67*10465441SEvalZero    MOV     r3, #1
68*10465441SEvalZero    STR     r3, [r2]
69*10465441SEvalZero
70*10465441SEvalZero    LDR     r2, =rt_interrupt_from_thread   /* set rt_interrupt_from_thread */
71*10465441SEvalZero    STR     r0, [r2]
72*10465441SEvalZero
73*10465441SEvalZero_reswitch:
74*10465441SEvalZero    LDR     r2, =rt_interrupt_to_thread     /* set rt_interrupt_to_thread */
75*10465441SEvalZero    STR     r1, [r2]
76*10465441SEvalZero
77*10465441SEvalZero    LDR r0, =NVIC_INT_CTRL              /* trigger the PendSV exception (causes context switch) */
78*10465441SEvalZero    LDR r1, =NVIC_PENDSVSET
79*10465441SEvalZero    STR r1, [r0]
80*10465441SEvalZero    BX  LR
81*10465441SEvalZero
82*10465441SEvalZero/* r0 --> switch from thread stack
83*10465441SEvalZero * r1 --> switch to thread stack
84*10465441SEvalZero * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
85*10465441SEvalZero */
86*10465441SEvalZero.global PendSV_Handler
87*10465441SEvalZero.type PendSV_Handler, %function
88*10465441SEvalZeroPendSV_Handler:
89*10465441SEvalZero    /* disable interrupt to protect context switch */
90*10465441SEvalZero    MRS r2, PRIMASK
91*10465441SEvalZero    CPSID   I
92*10465441SEvalZero
93*10465441SEvalZero    /* get rt_thread_switch_interrupt_flag */
94*10465441SEvalZero    LDR r0, =rt_thread_switch_interrupt_flag
95*10465441SEvalZero    LDR r1, [r0]
96*10465441SEvalZero    CBZ r1, pendsv_exit         /* pendsv already handled */
97*10465441SEvalZero
98*10465441SEvalZero    /* clear rt_thread_switch_interrupt_flag to 0 */
99*10465441SEvalZero    MOV r1, #0x00
100*10465441SEvalZero    STR r1, [r0]
101*10465441SEvalZero
102*10465441SEvalZero    LDR r0, =rt_interrupt_from_thread
103*10465441SEvalZero    LDR r1, [r0]
104*10465441SEvalZero    CBZ r1, switch_to_thread    /* skip register save at the first time */
105*10465441SEvalZero
106*10465441SEvalZero    MRS r1, psp                 /* get from thread stack pointer */
107*10465441SEvalZero
108*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
109*10465441SEvalZero    TST     lr, #0x10           /* if(!EXC_RETURN[4]) */
110*10465441SEvalZero    VSTMDBEQ r1!, {d8 - d15}    /* push FPU register s16~s31 */
111*10465441SEvalZero#endif
112*10465441SEvalZero
113*10465441SEvalZero    STMFD   r1!, {r4 - r11}     /* push r4 - r11 register */
114*10465441SEvalZero
115*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
116*10465441SEvalZero    MOV     r4, #0x00           /* flag = 0 */
117*10465441SEvalZero
118*10465441SEvalZero    TST     lr, #0x10           /* if(!EXC_RETURN[4]) */
119*10465441SEvalZero    MOVEQ   r4, #0x01           /* flag = 1 */
120*10465441SEvalZero
121*10465441SEvalZero    STMFD   r1!, {r4}           /* push flag */
122*10465441SEvalZero#endif
123*10465441SEvalZero
124*10465441SEvalZero    LDR r0, [r0]
125*10465441SEvalZero    STR r1, [r0]                /* update from thread stack pointer */
126*10465441SEvalZero
127*10465441SEvalZeroswitch_to_thread:
128*10465441SEvalZero    LDR r1, =rt_interrupt_to_thread
129*10465441SEvalZero    LDR r1, [r1]
130*10465441SEvalZero    LDR r1, [r1]                /* load thread stack pointer */
131*10465441SEvalZero
132*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
133*10465441SEvalZero    LDMFD   r1!, {r3}           /* pop flag */
134*10465441SEvalZero#endif
135*10465441SEvalZero
136*10465441SEvalZero    LDMFD   r1!, {r4 - r11}     /* pop r4 - r11 register */
137*10465441SEvalZero
138*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
139*10465441SEvalZero    CMP     r3,  #0             /* if(flag_r3 != 0) */
140*10465441SEvalZero    VLDMIANE  r1!, {d8 - d15}   /* pop FPU register s16~s31 */
141*10465441SEvalZero#endif
142*10465441SEvalZero
143*10465441SEvalZero    MSR psp, r1                 /* update stack pointer */
144*10465441SEvalZero
145*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
146*10465441SEvalZero    ORR     lr, lr, #0x10       /* lr |=  (1 << 4), clean FPCA. */
147*10465441SEvalZero    CMP     r3,  #0             /* if(flag_r3 != 0) */
148*10465441SEvalZero    BICNE   lr, lr, #0x10       /* lr &= ~(1 << 4), set FPCA. */
149*10465441SEvalZero#endif
150*10465441SEvalZero
151*10465441SEvalZeropendsv_exit:
152*10465441SEvalZero    /* restore interrupt */
153*10465441SEvalZero    MSR PRIMASK, r2
154*10465441SEvalZero
155*10465441SEvalZero    ORR lr, lr, #0x04
156*10465441SEvalZero    BX  lr
157*10465441SEvalZero
158*10465441SEvalZero/*
159*10465441SEvalZero * void rt_hw_context_switch_to(rt_uint32 to);
160*10465441SEvalZero * r0 --> to
161*10465441SEvalZero */
162*10465441SEvalZero.global rt_hw_context_switch_to
163*10465441SEvalZero.type rt_hw_context_switch_to, %function
164*10465441SEvalZerort_hw_context_switch_to:
165*10465441SEvalZero    LDR r1, =rt_interrupt_to_thread
166*10465441SEvalZero    STR r0, [r1]
167*10465441SEvalZero
168*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
169*10465441SEvalZero    /* CLEAR CONTROL.FPCA */
170*10465441SEvalZero    MRS     r2, CONTROL         /* read */
171*10465441SEvalZero    BIC     r2, #0x04           /* modify */
172*10465441SEvalZero    MSR     CONTROL, r2         /* write-back */
173*10465441SEvalZero#endif
174*10465441SEvalZero
175*10465441SEvalZero    /* set from thread to 0 */
176*10465441SEvalZero    LDR r1, =rt_interrupt_from_thread
177*10465441SEvalZero    MOV r0, #0x0
178*10465441SEvalZero    STR r0, [r1]
179*10465441SEvalZero
180*10465441SEvalZero    /* set interrupt flag to 1 */
181*10465441SEvalZero    LDR     r1, =rt_thread_switch_interrupt_flag
182*10465441SEvalZero    MOV     r0, #1
183*10465441SEvalZero    STR     r0, [r1]
184*10465441SEvalZero
185*10465441SEvalZero    /* set the PendSV exception priority */
186*10465441SEvalZero    LDR r0, =NVIC_SYSPRI2
187*10465441SEvalZero    LDR r1, =NVIC_PENDSV_PRI
188*10465441SEvalZero    LDR.W   r2, [r0,#0x00]       /* read       */
189*10465441SEvalZero    ORR     r1,r1,r2             /* modify     */
190*10465441SEvalZero    STR     r1, [r0]             /* write-back */
191*10465441SEvalZero
192*10465441SEvalZero    LDR r0, =NVIC_INT_CTRL      /* trigger the PendSV exception (causes context switch) */
193*10465441SEvalZero    LDR r1, =NVIC_PENDSVSET
194*10465441SEvalZero    STR r1, [r0]
195*10465441SEvalZero
196*10465441SEvalZero    /* restore MSP */
197*10465441SEvalZero    LDR     r0, =SCB_VTOR
198*10465441SEvalZero    LDR     r0, [r0]
199*10465441SEvalZero    LDR     r0, [r0]
200*10465441SEvalZero    NOP
201*10465441SEvalZero    MSR     msp, r0
202*10465441SEvalZero
203*10465441SEvalZero    /* enable interrupts at processor level */
204*10465441SEvalZero    CPSIE   F
205*10465441SEvalZero    CPSIE   I
206*10465441SEvalZero
207*10465441SEvalZero    /* never reach here! */
208*10465441SEvalZero
209*10465441SEvalZero/* compatible with old version */
210*10465441SEvalZero.global rt_hw_interrupt_thread_switch
211*10465441SEvalZero.type rt_hw_interrupt_thread_switch, %function
212*10465441SEvalZerort_hw_interrupt_thread_switch:
213*10465441SEvalZero    BX  lr
214*10465441SEvalZero    NOP
215*10465441SEvalZero
216*10465441SEvalZero.global HardFault_Handler
217*10465441SEvalZero.type HardFault_Handler, %function
218*10465441SEvalZeroHardFault_Handler:
219*10465441SEvalZero    /* get current context */
220*10465441SEvalZero    MRS     r0, msp                 /* get fault context from handler. */
221*10465441SEvalZero    TST     lr, #0x04               /* if(!EXC_RETURN[2]) */
222*10465441SEvalZero    BEQ     _get_sp_done
223*10465441SEvalZero    MRS     r0, psp                 /* get fault context from thread. */
224*10465441SEvalZero_get_sp_done:
225*10465441SEvalZero
226*10465441SEvalZero    STMFD   r0!, {r4 - r11}         /* push r4 - r11 register */
227*10465441SEvalZero#if defined (__VFP_FP__) && !defined(__SOFTFP__)
228*10465441SEvalZero    STMFD   r0!, {lr}               /* push dummy for flag */
229*10465441SEvalZero#endif
230*10465441SEvalZero    STMFD   r0!, {lr}               /* push exec_return register */
231*10465441SEvalZero
232*10465441SEvalZero    TST     lr, #0x04               /* if(!EXC_RETURN[2]) */
233*10465441SEvalZero    BEQ     _update_msp
234*10465441SEvalZero    MSR     psp, r0                 /* update stack pointer to PSP. */
235*10465441SEvalZero    B       _update_done
236*10465441SEvalZero_update_msp:
237*10465441SEvalZero    MSR     msp, r0                 /* update stack pointer to MSP. */
238*10465441SEvalZero_update_done:
239*10465441SEvalZero
240*10465441SEvalZero    PUSH    {LR}
241*10465441SEvalZero    BL      rt_hw_hard_fault_exception
242*10465441SEvalZero    POP     {LR}
243*10465441SEvalZero
244*10465441SEvalZero    ORR     lr, lr, #0x04
245*10465441SEvalZero    BX      lr
246