xref: /nrf52832-nimble/rt-thread/libcpu/arm/cortex-m4/context_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1/*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date           Author       Notes
8 * 2009-10-11     Bernard      first version
9 * 2012-01-01     aozima       support context switch load/store FPU register.
10 * 2013-06-18     aozima       add restore MSP feature.
11 * 2013-06-23     aozima       support lazy stack optimized.
12 * 2018-07-24     aozima       enhancement hard fault exception handler.
13 */
14
15/**
16 * @addtogroup cortex-m4
17 */
18/*@{*/
19
20.cpu cortex-m4
21.syntax unified
22.thumb
23.text
24
25.equ    SCB_VTOR,           0xE000ED08              /* Vector Table Offset Register */
26.equ    NVIC_INT_CTRL,      0xE000ED04              /* interrupt control state register */
27.equ    NVIC_SYSPRI2,       0xE000ED20              /* system priority register (2) */
28.equ    NVIC_PENDSV_PRI,    0x00FF0000              /* PendSV priority value (lowest) */
29.equ    NVIC_PENDSVSET,     0x10000000              /* value to trigger PendSV exception */
30
31/*
32 * rt_base_t rt_hw_interrupt_disable();
33 */
34.global rt_hw_interrupt_disable
35.type rt_hw_interrupt_disable, %function
36rt_hw_interrupt_disable:
37    MRS     r0, PRIMASK
38    CPSID   I
39    BX      LR
40
41/*
42 * void rt_hw_interrupt_enable(rt_base_t level);
43 */
44.global rt_hw_interrupt_enable
45.type rt_hw_interrupt_enable, %function
46rt_hw_interrupt_enable:
47    MSR     PRIMASK, r0
48    BX      LR
49
50/*
51 * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
52 * r0 --> from
53 * r1 --> to
54 */
55.global rt_hw_context_switch_interrupt
56.type rt_hw_context_switch_interrupt, %function
57.global rt_hw_context_switch
58.type rt_hw_context_switch, %function
59
60rt_hw_context_switch_interrupt:
61rt_hw_context_switch:
62    /* set rt_thread_switch_interrupt_flag to 1 */
63    LDR     r2, =rt_thread_switch_interrupt_flag
64    LDR     r3, [r2]
65    CMP     r3, #1
66    BEQ     _reswitch
67    MOV     r3, #1
68    STR     r3, [r2]
69
70    LDR     r2, =rt_interrupt_from_thread   /* set rt_interrupt_from_thread */
71    STR     r0, [r2]
72
73_reswitch:
74    LDR     r2, =rt_interrupt_to_thread     /* set rt_interrupt_to_thread */
75    STR     r1, [r2]
76
77    LDR r0, =NVIC_INT_CTRL              /* trigger the PendSV exception (causes context switch) */
78    LDR r1, =NVIC_PENDSVSET
79    STR r1, [r0]
80    BX  LR
81
82/* r0 --> switch from thread stack
83 * r1 --> switch to thread stack
84 * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
85 */
86.global PendSV_Handler
87.type PendSV_Handler, %function
88PendSV_Handler:
89    /* disable interrupt to protect context switch */
90    MRS r2, PRIMASK
91    CPSID   I
92
93    /* get rt_thread_switch_interrupt_flag */
94    LDR r0, =rt_thread_switch_interrupt_flag
95    LDR r1, [r0]
96    CBZ r1, pendsv_exit         /* pendsv already handled */
97
98    /* clear rt_thread_switch_interrupt_flag to 0 */
99    MOV r1, #0x00
100    STR r1, [r0]
101
102    LDR r0, =rt_interrupt_from_thread
103    LDR r1, [r0]
104    CBZ r1, switch_to_thread    /* skip register save at the first time */
105
106    MRS r1, psp                 /* get from thread stack pointer */
107
108#if defined (__VFP_FP__) && !defined(__SOFTFP__)
109    TST     lr, #0x10           /* if(!EXC_RETURN[4]) */
110    VSTMDBEQ r1!, {d8 - d15}    /* push FPU register s16~s31 */
111#endif
112
113    STMFD   r1!, {r4 - r11}     /* push r4 - r11 register */
114
115#if defined (__VFP_FP__) && !defined(__SOFTFP__)
116    MOV     r4, #0x00           /* flag = 0 */
117
118    TST     lr, #0x10           /* if(!EXC_RETURN[4]) */
119    MOVEQ   r4, #0x01           /* flag = 1 */
120
121    STMFD   r1!, {r4}           /* push flag */
122#endif
123
124    LDR r0, [r0]
125    STR r1, [r0]                /* update from thread stack pointer */
126
127switch_to_thread:
128    LDR r1, =rt_interrupt_to_thread
129    LDR r1, [r1]
130    LDR r1, [r1]                /* load thread stack pointer */
131
132#if defined (__VFP_FP__) && !defined(__SOFTFP__)
133    LDMFD   r1!, {r3}           /* pop flag */
134#endif
135
136    LDMFD   r1!, {r4 - r11}     /* pop r4 - r11 register */
137
138#if defined (__VFP_FP__) && !defined(__SOFTFP__)
139    CMP     r3,  #0             /* if(flag_r3 != 0) */
140    VLDMIANE  r1!, {d8 - d15}   /* pop FPU register s16~s31 */
141#endif
142
143    MSR psp, r1                 /* update stack pointer */
144
145#if defined (__VFP_FP__) && !defined(__SOFTFP__)
146    ORR     lr, lr, #0x10       /* lr |=  (1 << 4), clean FPCA. */
147    CMP     r3,  #0             /* if(flag_r3 != 0) */
148    BICNE   lr, lr, #0x10       /* lr &= ~(1 << 4), set FPCA. */
149#endif
150
151pendsv_exit:
152    /* restore interrupt */
153    MSR PRIMASK, r2
154
155    ORR lr, lr, #0x04
156    BX  lr
157
158/*
159 * void rt_hw_context_switch_to(rt_uint32 to);
160 * r0 --> to
161 */
162.global rt_hw_context_switch_to
163.type rt_hw_context_switch_to, %function
164rt_hw_context_switch_to:
165    LDR r1, =rt_interrupt_to_thread
166    STR r0, [r1]
167
168#if defined (__VFP_FP__) && !defined(__SOFTFP__)
169    /* CLEAR CONTROL.FPCA */
170    MRS     r2, CONTROL         /* read */
171    BIC     r2, #0x04           /* modify */
172    MSR     CONTROL, r2         /* write-back */
173#endif
174
175    /* set from thread to 0 */
176    LDR r1, =rt_interrupt_from_thread
177    MOV r0, #0x0
178    STR r0, [r1]
179
180    /* set interrupt flag to 1 */
181    LDR     r1, =rt_thread_switch_interrupt_flag
182    MOV     r0, #1
183    STR     r0, [r1]
184
185    /* set the PendSV exception priority */
186    LDR r0, =NVIC_SYSPRI2
187    LDR r1, =NVIC_PENDSV_PRI
188    LDR.W   r2, [r0,#0x00]       /* read       */
189    ORR     r1,r1,r2             /* modify     */
190    STR     r1, [r0]             /* write-back */
191
192    LDR r0, =NVIC_INT_CTRL      /* trigger the PendSV exception (causes context switch) */
193    LDR r1, =NVIC_PENDSVSET
194    STR r1, [r0]
195
196    /* restore MSP */
197    LDR     r0, =SCB_VTOR
198    LDR     r0, [r0]
199    LDR     r0, [r0]
200    NOP
201    MSR     msp, r0
202
203    /* enable interrupts at processor level */
204    CPSIE   F
205    CPSIE   I
206
207    /* never reach here! */
208
209/* compatible with old version */
210.global rt_hw_interrupt_thread_switch
211.type rt_hw_interrupt_thread_switch, %function
212rt_hw_interrupt_thread_switch:
213    BX  lr
214    NOP
215
216.global HardFault_Handler
217.type HardFault_Handler, %function
218HardFault_Handler:
219    /* get current context */
220    MRS     r0, msp                 /* get fault context from handler. */
221    TST     lr, #0x04               /* if(!EXC_RETURN[2]) */
222    BEQ     _get_sp_done
223    MRS     r0, psp                 /* get fault context from thread. */
224_get_sp_done:
225
226    STMFD   r0!, {r4 - r11}         /* push r4 - r11 register */
227#if defined (__VFP_FP__) && !defined(__SOFTFP__)
228    STMFD   r0!, {lr}               /* push dummy for flag */
229#endif
230    STMFD   r0!, {lr}               /* push exec_return register */
231
232    TST     lr, #0x04               /* if(!EXC_RETURN[2]) */
233    BEQ     _update_msp
234    MSR     psp, r0                 /* update stack pointer to PSP. */
235    B       _update_done
236_update_msp:
237    MSR     msp, r0                 /* update stack pointer to MSP. */
238_update_done:
239
240    PUSH    {LR}
241    BL      rt_hw_hard_fault_exception
242    POP     {LR}
243
244    ORR     lr, lr, #0x04
245    BX      lr
246