xref: /nrf52832-nimble/rt-thread/src/scheduler.c (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero /*
2*10465441SEvalZero  * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero  *
4*10465441SEvalZero  * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero  *
6*10465441SEvalZero  * Change Logs:
7*10465441SEvalZero  * Date           Author       Notes
8*10465441SEvalZero  * 2006-03-17     Bernard      the first version
9*10465441SEvalZero  * 2006-04-28     Bernard      fix the scheduler algorthm
10*10465441SEvalZero  * 2006-04-30     Bernard      add SCHEDULER_DEBUG
11*10465441SEvalZero  * 2006-05-27     Bernard      fix the scheduler algorthm for same priority
12*10465441SEvalZero  *                             thread schedule
13*10465441SEvalZero  * 2006-06-04     Bernard      rewrite the scheduler algorithm
14*10465441SEvalZero  * 2006-08-03     Bernard      add hook support
15*10465441SEvalZero  * 2006-09-05     Bernard      add 32 priority level support
16*10465441SEvalZero  * 2006-09-24     Bernard      add rt_system_scheduler_start function
17*10465441SEvalZero  * 2009-09-16     Bernard      fix _rt_scheduler_stack_check
18*10465441SEvalZero  * 2010-04-11     yi.qiu       add module feature
19*10465441SEvalZero  * 2010-07-13     Bernard      fix the maximal number of rt_scheduler_lock_nest
20*10465441SEvalZero  *                             issue found by kuronca
21*10465441SEvalZero  * 2010-12-13     Bernard      add defunct list initialization even if not use heap.
22*10465441SEvalZero  * 2011-05-10     Bernard      clean scheduler debug log.
23*10465441SEvalZero  * 2013-12-21     Grissiom     add rt_critical_level
24*10465441SEvalZero  * 2018-11-22     Jesven       remove the current task from ready queue
25*10465441SEvalZero  *                             add per cpu ready queue
26*10465441SEvalZero  *                             add _get_highest_priority_thread to find highest priority task
27*10465441SEvalZero  *                             rt_schedule_insert_thread won't insert current task to ready queue
28*10465441SEvalZero  *                             in smp version, rt_hw_context_switch_interrupt maybe switch to
29*10465441SEvalZero  *                               new task directly
30*10465441SEvalZero  *
31*10465441SEvalZero  */
32*10465441SEvalZero 
33*10465441SEvalZero #include <rtthread.h>
34*10465441SEvalZero #include <rthw.h>
35*10465441SEvalZero 
36*10465441SEvalZero #ifdef RT_USING_SMP
37*10465441SEvalZero rt_hw_spinlock_t _rt_critical_lock;
38*10465441SEvalZero #endif /*RT_USING_SMP*/
39*10465441SEvalZero 
40*10465441SEvalZero rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
41*10465441SEvalZero rt_uint32_t rt_thread_ready_priority_group;
42*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
43*10465441SEvalZero /* Maximum priority level, 256 */
44*10465441SEvalZero rt_uint8_t rt_thread_ready_table[32];
45*10465441SEvalZero #endif
46*10465441SEvalZero 
47*10465441SEvalZero #ifndef RT_USING_SMP
48*10465441SEvalZero extern volatile rt_uint8_t rt_interrupt_nest;
49*10465441SEvalZero static rt_int16_t rt_scheduler_lock_nest;
50*10465441SEvalZero struct rt_thread *rt_current_thread;
51*10465441SEvalZero rt_uint8_t rt_current_priority;
52*10465441SEvalZero #endif /*RT_USING_SMP*/
53*10465441SEvalZero 
54*10465441SEvalZero rt_list_t rt_thread_defunct;
55*10465441SEvalZero 
56*10465441SEvalZero #ifdef RT_USING_HOOK
57*10465441SEvalZero static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
58*10465441SEvalZero 
59*10465441SEvalZero /**
60*10465441SEvalZero  * @addtogroup Hook
61*10465441SEvalZero  */
62*10465441SEvalZero 
63*10465441SEvalZero /**@{*/
64*10465441SEvalZero 
65*10465441SEvalZero /**
66*10465441SEvalZero  * This function will set a hook function, which will be invoked when thread
67*10465441SEvalZero  * switch happens.
68*10465441SEvalZero  *
69*10465441SEvalZero  * @param hook the hook function
70*10465441SEvalZero  */
71*10465441SEvalZero void
rt_scheduler_sethook(void (* hook)(struct rt_thread * from,struct rt_thread * to))72*10465441SEvalZero rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
73*10465441SEvalZero {
74*10465441SEvalZero     rt_scheduler_hook = hook;
75*10465441SEvalZero }
76*10465441SEvalZero 
77*10465441SEvalZero /**@}*/
78*10465441SEvalZero #endif
79*10465441SEvalZero 
80*10465441SEvalZero #ifdef RT_USING_OVERFLOW_CHECK
_rt_scheduler_stack_check(struct rt_thread * thread)81*10465441SEvalZero static void _rt_scheduler_stack_check(struct rt_thread *thread)
82*10465441SEvalZero {
83*10465441SEvalZero     RT_ASSERT(thread != RT_NULL);
84*10465441SEvalZero 
85*10465441SEvalZero #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
86*10465441SEvalZero 	if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
87*10465441SEvalZero #else
88*10465441SEvalZero     if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
89*10465441SEvalZero #endif
90*10465441SEvalZero         (rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
91*10465441SEvalZero         (rt_ubase_t)thread->sp >
92*10465441SEvalZero         (rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
93*10465441SEvalZero     {
94*10465441SEvalZero         rt_ubase_t level;
95*10465441SEvalZero 
96*10465441SEvalZero         rt_kprintf("thread:%s stack overflow\n", thread->name);
97*10465441SEvalZero #ifdef RT_USING_FINSH
98*10465441SEvalZero         {
99*10465441SEvalZero             extern long list_thread(void);
100*10465441SEvalZero             list_thread();
101*10465441SEvalZero         }
102*10465441SEvalZero #endif
103*10465441SEvalZero         level = rt_hw_interrupt_disable();
104*10465441SEvalZero         while (level);
105*10465441SEvalZero     }
106*10465441SEvalZero #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
107*10465441SEvalZero     else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
108*10465441SEvalZero     {
109*10465441SEvalZero         rt_kprintf("warning: %s stack is close to the top of stack address.\n",
110*10465441SEvalZero                    thread->name);
111*10465441SEvalZero     }
112*10465441SEvalZero #else
113*10465441SEvalZero     else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
114*10465441SEvalZero     {
115*10465441SEvalZero         rt_kprintf("warning: %s stack is close to end of stack address.\n",
116*10465441SEvalZero                    thread->name);
117*10465441SEvalZero     }
118*10465441SEvalZero #endif
119*10465441SEvalZero }
120*10465441SEvalZero #endif
121*10465441SEvalZero 
122*10465441SEvalZero /*
123*10465441SEvalZero  * get the highest priority thread in ready queue
124*10465441SEvalZero  */
125*10465441SEvalZero #ifdef RT_USING_SMP
_get_highest_priority_thread(rt_ubase_t * highest_prio)126*10465441SEvalZero static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio)
127*10465441SEvalZero {
128*10465441SEvalZero     register struct rt_thread *highest_priority_thread;
129*10465441SEvalZero     register rt_ubase_t highest_ready_priority, local_highest_ready_priority;
130*10465441SEvalZero     struct rt_cpu* pcpu = rt_cpu_self();
131*10465441SEvalZero 
132*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
133*10465441SEvalZero     register rt_ubase_t number;
134*10465441SEvalZero 
135*10465441SEvalZero     if (rt_thread_ready_priority_group == 0 && pcpu->priority_group == 0)
136*10465441SEvalZero     {
137*10465441SEvalZero         *highest_prio = pcpu->current_thread->current_priority;
138*10465441SEvalZero         /* only local IDLE is readly */
139*10465441SEvalZero         return pcpu->current_thread;
140*10465441SEvalZero     }
141*10465441SEvalZero 
142*10465441SEvalZero     number = __rt_ffs(rt_thread_ready_priority_group) - 1;
143*10465441SEvalZero     highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
144*10465441SEvalZero     number = __rt_ffs(pcpu->priority_group) - 1;
145*10465441SEvalZero     local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
146*10465441SEvalZero #else
147*10465441SEvalZero     highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
148*10465441SEvalZero     local_highest_ready_priority = __rt_ffs(pcpu->priority_group) - 1;
149*10465441SEvalZero #endif
150*10465441SEvalZero 
151*10465441SEvalZero     /* get highest ready priority thread */
152*10465441SEvalZero     if (highest_ready_priority < local_highest_ready_priority)
153*10465441SEvalZero     {
154*10465441SEvalZero         *highest_prio = highest_ready_priority;
155*10465441SEvalZero         highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
156*10465441SEvalZero                                   struct rt_thread,
157*10465441SEvalZero                                   tlist);
158*10465441SEvalZero     }
159*10465441SEvalZero     else
160*10465441SEvalZero     {
161*10465441SEvalZero         *highest_prio = local_highest_ready_priority;
162*10465441SEvalZero         highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next,
163*10465441SEvalZero                                   struct rt_thread,
164*10465441SEvalZero                                   tlist);
165*10465441SEvalZero     }
166*10465441SEvalZero 
167*10465441SEvalZero     return highest_priority_thread;
168*10465441SEvalZero }
169*10465441SEvalZero #else
_get_highest_priority_thread(rt_ubase_t * highest_prio)170*10465441SEvalZero static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio)
171*10465441SEvalZero {
172*10465441SEvalZero     register struct rt_thread *highest_priority_thread;
173*10465441SEvalZero     register rt_ubase_t highest_ready_priority;
174*10465441SEvalZero 
175*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
176*10465441SEvalZero     register rt_ubase_t number;
177*10465441SEvalZero 
178*10465441SEvalZero     number = __rt_ffs(rt_thread_ready_priority_group) - 1;
179*10465441SEvalZero     highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
180*10465441SEvalZero #else
181*10465441SEvalZero     highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
182*10465441SEvalZero #endif
183*10465441SEvalZero 
184*10465441SEvalZero     /* get highest ready priority thread */
185*10465441SEvalZero     highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
186*10465441SEvalZero                               struct rt_thread,
187*10465441SEvalZero                               tlist);
188*10465441SEvalZero 
189*10465441SEvalZero     *highest_prio = highest_ready_priority;
190*10465441SEvalZero 
191*10465441SEvalZero     return highest_priority_thread;
192*10465441SEvalZero }
193*10465441SEvalZero #endif
194*10465441SEvalZero 
195*10465441SEvalZero /**
196*10465441SEvalZero  * @ingroup SystemInit
197*10465441SEvalZero  * This function will initialize the system scheduler
198*10465441SEvalZero  */
rt_system_scheduler_init(void)199*10465441SEvalZero void rt_system_scheduler_init(void)
200*10465441SEvalZero {
201*10465441SEvalZero #ifdef RT_USING_SMP
202*10465441SEvalZero     int cpu;
203*10465441SEvalZero #endif /*RT_USING_SMP*/
204*10465441SEvalZero     register rt_base_t offset;
205*10465441SEvalZero 
206*10465441SEvalZero #ifndef RT_USING_SMP
207*10465441SEvalZero     rt_scheduler_lock_nest = 0;
208*10465441SEvalZero #endif /*RT_USING_SMP*/
209*10465441SEvalZero 
210*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",
211*10465441SEvalZero                                       RT_THREAD_PRIORITY_MAX));
212*10465441SEvalZero 
213*10465441SEvalZero     for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
214*10465441SEvalZero     {
215*10465441SEvalZero         rt_list_init(&rt_thread_priority_table[offset]);
216*10465441SEvalZero     }
217*10465441SEvalZero #ifdef RT_USING_SMP
218*10465441SEvalZero     for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
219*10465441SEvalZero     {
220*10465441SEvalZero         struct rt_cpu *pcpu =  rt_cpu_index(cpu);
221*10465441SEvalZero         for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
222*10465441SEvalZero         {
223*10465441SEvalZero             rt_list_init(&pcpu->priority_table[offset]);
224*10465441SEvalZero         }
225*10465441SEvalZero 
226*10465441SEvalZero         pcpu->irq_switch_flag = 0;
227*10465441SEvalZero         pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
228*10465441SEvalZero         pcpu->current_thread = RT_NULL;
229*10465441SEvalZero         pcpu->priority_group = 0;
230*10465441SEvalZero 
231*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
232*10465441SEvalZero         rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
233*10465441SEvalZero #endif
234*10465441SEvalZero     }
235*10465441SEvalZero #endif /*RT_USING_SMP*/
236*10465441SEvalZero 
237*10465441SEvalZero     /* initialize ready priority group */
238*10465441SEvalZero     rt_thread_ready_priority_group = 0;
239*10465441SEvalZero 
240*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
241*10465441SEvalZero     /* initialize ready table */
242*10465441SEvalZero     rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
243*10465441SEvalZero #endif
244*10465441SEvalZero 
245*10465441SEvalZero     /* initialize thread defunct */
246*10465441SEvalZero     rt_list_init(&rt_thread_defunct);
247*10465441SEvalZero }
248*10465441SEvalZero 
249*10465441SEvalZero /**
250*10465441SEvalZero  * @ingroup SystemInit
251*10465441SEvalZero  * This function will startup scheduler. It will select one thread
252*10465441SEvalZero  * with the highest priority level, then switch to it.
253*10465441SEvalZero  */
rt_system_scheduler_start(void)254*10465441SEvalZero void rt_system_scheduler_start(void)
255*10465441SEvalZero {
256*10465441SEvalZero     register struct rt_thread *to_thread;
257*10465441SEvalZero     rt_ubase_t highest_ready_priority;
258*10465441SEvalZero 
259*10465441SEvalZero     to_thread = _get_highest_priority_thread(&highest_ready_priority);
260*10465441SEvalZero 
261*10465441SEvalZero #ifdef RT_USING_SMP
262*10465441SEvalZero     to_thread->oncpu = rt_hw_cpu_id();
263*10465441SEvalZero #else
264*10465441SEvalZero     rt_current_thread = to_thread;
265*10465441SEvalZero #endif /*RT_USING_SMP*/
266*10465441SEvalZero 
267*10465441SEvalZero     rt_schedule_remove_thread(to_thread);
268*10465441SEvalZero     to_thread->stat = RT_THREAD_RUNNING;
269*10465441SEvalZero 
270*10465441SEvalZero     /* switch to new thread */
271*10465441SEvalZero #ifdef RT_USING_SMP
272*10465441SEvalZero     rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
273*10465441SEvalZero #else
274*10465441SEvalZero     rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp);
275*10465441SEvalZero #endif /*RT_USING_SMP*/
276*10465441SEvalZero 
277*10465441SEvalZero     /* never come back */
278*10465441SEvalZero }
279*10465441SEvalZero 
280*10465441SEvalZero /**
281*10465441SEvalZero  * @addtogroup Thread
282*10465441SEvalZero  */
283*10465441SEvalZero 
284*10465441SEvalZero /**@{*/
285*10465441SEvalZero 
286*10465441SEvalZero 
287*10465441SEvalZero #ifdef RT_USING_SMP
288*10465441SEvalZero /**
289*10465441SEvalZero  * This function will handle IPI interrupt and do a scheduling in system;
290*10465441SEvalZero  *
291*10465441SEvalZero  * @param vector, the number of IPI interrupt for system scheduling
292*10465441SEvalZero  * @param param, use RT_NULL
293*10465441SEvalZero  *
294*10465441SEvalZero  * NOTE: this function should be invoke or register as ISR in BSP.
295*10465441SEvalZero  */
rt_scheduler_ipi_handler(int vector,void * param)296*10465441SEvalZero void rt_scheduler_ipi_handler(int vector, void *param)
297*10465441SEvalZero {
298*10465441SEvalZero     rt_schedule();
299*10465441SEvalZero }
300*10465441SEvalZero 
301*10465441SEvalZero /**
302*10465441SEvalZero  * This function will perform one scheduling. It will select one thread
303*10465441SEvalZero  * with the highest priority level in global ready queue or local ready queue,
304*10465441SEvalZero  * then switch to it.
305*10465441SEvalZero  */
rt_schedule(void)306*10465441SEvalZero void rt_schedule(void)
307*10465441SEvalZero {
308*10465441SEvalZero     rt_base_t level;
309*10465441SEvalZero     struct rt_thread *to_thread;
310*10465441SEvalZero     struct rt_thread *current_thread;
311*10465441SEvalZero     struct rt_cpu    *pcpu;
312*10465441SEvalZero     int cpu_id;
313*10465441SEvalZero 
314*10465441SEvalZero     /* disable interrupt */
315*10465441SEvalZero     level  = rt_hw_interrupt_disable();
316*10465441SEvalZero 
317*10465441SEvalZero     cpu_id = rt_hw_cpu_id();
318*10465441SEvalZero     pcpu   = rt_cpu_index(cpu_id);
319*10465441SEvalZero     current_thread = pcpu->current_thread;
320*10465441SEvalZero 
321*10465441SEvalZero     /* whether do switch in interrupt */
322*10465441SEvalZero     if (pcpu->irq_nest)
323*10465441SEvalZero     {
324*10465441SEvalZero         pcpu->irq_switch_flag = 1;
325*10465441SEvalZero     }
326*10465441SEvalZero     else if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */
327*10465441SEvalZero     {
328*10465441SEvalZero         rt_ubase_t highest_ready_priority;
329*10465441SEvalZero 
330*10465441SEvalZero         if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
331*10465441SEvalZero         {
332*10465441SEvalZero             to_thread = _get_highest_priority_thread(&highest_ready_priority);
333*10465441SEvalZero             current_thread->oncpu = RT_CPU_DETACHED;
334*10465441SEvalZero             if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
335*10465441SEvalZero             {
336*10465441SEvalZero                 if (current_thread->current_priority < highest_ready_priority)
337*10465441SEvalZero                 {
338*10465441SEvalZero                     to_thread = current_thread;
339*10465441SEvalZero                 }
340*10465441SEvalZero                 else
341*10465441SEvalZero                 {
342*10465441SEvalZero                     rt_schedule_insert_thread(current_thread);
343*10465441SEvalZero                 }
344*10465441SEvalZero             }
345*10465441SEvalZero             to_thread->oncpu = cpu_id;
346*10465441SEvalZero             if (to_thread != current_thread)
347*10465441SEvalZero             {
348*10465441SEvalZero                 /* if the destination thread is not the same as current thread */
349*10465441SEvalZero                 pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
350*10465441SEvalZero 
351*10465441SEvalZero                 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
352*10465441SEvalZero 
353*10465441SEvalZero                 rt_schedule_remove_thread(to_thread);
354*10465441SEvalZero                 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
355*10465441SEvalZero 
356*10465441SEvalZero                 /* switch to new thread */
357*10465441SEvalZero                 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
358*10465441SEvalZero                         ("[%d]switch to priority#%d "
359*10465441SEvalZero                          "thread:%.*s(sp:0x%08x), "
360*10465441SEvalZero                          "from thread:%.*s(sp: 0x%08x)\n",
361*10465441SEvalZero                          pcpu->irq_nest, highest_ready_priority,
362*10465441SEvalZero                          RT_NAME_MAX, to_thread->name, to_thread->sp,
363*10465441SEvalZero                          RT_NAME_MAX, current_thread->name, current_thread->sp));
364*10465441SEvalZero 
365*10465441SEvalZero #ifdef RT_USING_OVERFLOW_CHECK
366*10465441SEvalZero                 _rt_scheduler_stack_check(to_thread);
367*10465441SEvalZero #endif
368*10465441SEvalZero 
369*10465441SEvalZero                 {
370*10465441SEvalZero                     extern void rt_thread_handle_sig(rt_bool_t clean_state);
371*10465441SEvalZero 
372*10465441SEvalZero                     rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
373*10465441SEvalZero                                          (rt_ubase_t)&to_thread->sp, to_thread);
374*10465441SEvalZero 
375*10465441SEvalZero                     /* enable interrupt */
376*10465441SEvalZero                     rt_hw_interrupt_enable(level);
377*10465441SEvalZero 
378*10465441SEvalZero #ifdef RT_USING_SIGNALS
379*10465441SEvalZero                     /* check signal status */
380*10465441SEvalZero                     rt_thread_handle_sig(RT_TRUE);
381*10465441SEvalZero #endif
382*10465441SEvalZero                     goto __exit;
383*10465441SEvalZero                 }
384*10465441SEvalZero             }
385*10465441SEvalZero         }
386*10465441SEvalZero     }
387*10465441SEvalZero 
388*10465441SEvalZero     /* enable interrupt */
389*10465441SEvalZero     rt_hw_interrupt_enable(level);
390*10465441SEvalZero 
391*10465441SEvalZero __exit:
392*10465441SEvalZero     return ;
393*10465441SEvalZero }
394*10465441SEvalZero #else
395*10465441SEvalZero /**
396*10465441SEvalZero  * This function will perform one schedule. It will select one thread
397*10465441SEvalZero  * with the highest priority level, then switch to it.
398*10465441SEvalZero  */
rt_schedule(void)399*10465441SEvalZero void rt_schedule(void)
400*10465441SEvalZero {
401*10465441SEvalZero     rt_base_t level;
402*10465441SEvalZero     struct rt_thread *to_thread;
403*10465441SEvalZero     struct rt_thread *from_thread;
404*10465441SEvalZero 
405*10465441SEvalZero     /* disable interrupt */
406*10465441SEvalZero     level = rt_hw_interrupt_disable();
407*10465441SEvalZero 
408*10465441SEvalZero     /* check the scheduler is enabled or not */
409*10465441SEvalZero     if (rt_scheduler_lock_nest == 0)
410*10465441SEvalZero     {
411*10465441SEvalZero         rt_ubase_t highest_ready_priority;
412*10465441SEvalZero 
413*10465441SEvalZero         if (rt_thread_ready_priority_group != 0)
414*10465441SEvalZero         {
415*10465441SEvalZero             /* need_insert_from_thread: need to insert from_thread to ready queue */
416*10465441SEvalZero             int need_insert_from_thread = 0;
417*10465441SEvalZero 
418*10465441SEvalZero             to_thread = _get_highest_priority_thread(&highest_ready_priority);
419*10465441SEvalZero 
420*10465441SEvalZero             if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
421*10465441SEvalZero             {
422*10465441SEvalZero                 if (rt_current_thread->current_priority < highest_ready_priority)
423*10465441SEvalZero                 {
424*10465441SEvalZero                     to_thread = rt_current_thread;
425*10465441SEvalZero                 }
426*10465441SEvalZero                 else
427*10465441SEvalZero                 {
428*10465441SEvalZero                     need_insert_from_thread = 1;
429*10465441SEvalZero                 }
430*10465441SEvalZero             }
431*10465441SEvalZero 
432*10465441SEvalZero             if (to_thread != rt_current_thread)
433*10465441SEvalZero             {
434*10465441SEvalZero                 /* if the destination thread is not the same as current thread */
435*10465441SEvalZero                 rt_current_priority = (rt_uint8_t)highest_ready_priority;
436*10465441SEvalZero                 from_thread         = rt_current_thread;
437*10465441SEvalZero                 rt_current_thread   = to_thread;
438*10465441SEvalZero 
439*10465441SEvalZero                 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
440*10465441SEvalZero 
441*10465441SEvalZero                 if (need_insert_from_thread)
442*10465441SEvalZero                 {
443*10465441SEvalZero                     rt_schedule_insert_thread(from_thread);
444*10465441SEvalZero                 }
445*10465441SEvalZero 
446*10465441SEvalZero                 rt_schedule_remove_thread(to_thread);
447*10465441SEvalZero                 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
448*10465441SEvalZero 
449*10465441SEvalZero                 /* switch to new thread */
450*10465441SEvalZero                 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
451*10465441SEvalZero                         ("[%d]switch to priority#%d "
452*10465441SEvalZero                          "thread:%.*s(sp:0x%08x), "
453*10465441SEvalZero                          "from thread:%.*s(sp: 0x%08x)\n",
454*10465441SEvalZero                          rt_interrupt_nest, highest_ready_priority,
455*10465441SEvalZero                          RT_NAME_MAX, to_thread->name, to_thread->sp,
456*10465441SEvalZero                          RT_NAME_MAX, from_thread->name, from_thread->sp));
457*10465441SEvalZero 
458*10465441SEvalZero #ifdef RT_USING_OVERFLOW_CHECK
459*10465441SEvalZero                 _rt_scheduler_stack_check(to_thread);
460*10465441SEvalZero #endif
461*10465441SEvalZero 
462*10465441SEvalZero                 if (rt_interrupt_nest == 0)
463*10465441SEvalZero                 {
464*10465441SEvalZero                     extern void rt_thread_handle_sig(rt_bool_t clean_state);
465*10465441SEvalZero 
466*10465441SEvalZero                     rt_hw_context_switch((rt_ubase_t)&from_thread->sp,
467*10465441SEvalZero                             (rt_ubase_t)&to_thread->sp);
468*10465441SEvalZero 
469*10465441SEvalZero                     /* enable interrupt */
470*10465441SEvalZero                     rt_hw_interrupt_enable(level);
471*10465441SEvalZero 
472*10465441SEvalZero #ifdef RT_USING_SIGNALS
473*10465441SEvalZero                     /* check signal status */
474*10465441SEvalZero                     rt_thread_handle_sig(RT_TRUE);
475*10465441SEvalZero #endif
476*10465441SEvalZero                     goto __exit;
477*10465441SEvalZero                 }
478*10465441SEvalZero                 else
479*10465441SEvalZero                 {
480*10465441SEvalZero                     RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
481*10465441SEvalZero 
482*10465441SEvalZero                     rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp,
483*10465441SEvalZero                             (rt_ubase_t)&to_thread->sp);
484*10465441SEvalZero                 }
485*10465441SEvalZero             }
486*10465441SEvalZero             else
487*10465441SEvalZero             {
488*10465441SEvalZero                 rt_schedule_remove_thread(rt_current_thread);
489*10465441SEvalZero                 rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
490*10465441SEvalZero             }
491*10465441SEvalZero         }
492*10465441SEvalZero     }
493*10465441SEvalZero 
494*10465441SEvalZero     /* enable interrupt */
495*10465441SEvalZero     rt_hw_interrupt_enable(level);
496*10465441SEvalZero 
497*10465441SEvalZero __exit:
498*10465441SEvalZero     return;
499*10465441SEvalZero }
500*10465441SEvalZero #endif /*RT_USING_SMP*/
501*10465441SEvalZero 
502*10465441SEvalZero /**
503*10465441SEvalZero  * This function checks if a scheduling is needed after IRQ context. If yes,
504*10465441SEvalZero  * it will select one thread with the highest priority level, and then switch
505*10465441SEvalZero  * to it.
506*10465441SEvalZero  */
507*10465441SEvalZero #ifdef RT_USING_SMP
rt_scheduler_do_irq_switch(void * context)508*10465441SEvalZero void rt_scheduler_do_irq_switch(void *context)
509*10465441SEvalZero {
510*10465441SEvalZero     int cpu_id;
511*10465441SEvalZero     rt_base_t level;
512*10465441SEvalZero     struct rt_cpu* pcpu;
513*10465441SEvalZero     struct rt_thread *to_thread;
514*10465441SEvalZero     struct rt_thread *current_thread;
515*10465441SEvalZero 
516*10465441SEvalZero     level = rt_hw_interrupt_disable();
517*10465441SEvalZero 
518*10465441SEvalZero     cpu_id = rt_hw_cpu_id();
519*10465441SEvalZero     pcpu   = rt_cpu_index(cpu_id);
520*10465441SEvalZero     current_thread = pcpu->current_thread;
521*10465441SEvalZero 
522*10465441SEvalZero     if (pcpu->irq_switch_flag == 0)
523*10465441SEvalZero     {
524*10465441SEvalZero         rt_hw_interrupt_enable(level);
525*10465441SEvalZero         return;
526*10465441SEvalZero     }
527*10465441SEvalZero 
528*10465441SEvalZero     if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0)
529*10465441SEvalZero     {
530*10465441SEvalZero         rt_ubase_t highest_ready_priority;
531*10465441SEvalZero 
532*10465441SEvalZero         /* clear irq switch flag */
533*10465441SEvalZero         pcpu->irq_switch_flag = 0;
534*10465441SEvalZero 
535*10465441SEvalZero         if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
536*10465441SEvalZero         {
537*10465441SEvalZero             to_thread = _get_highest_priority_thread(&highest_ready_priority);
538*10465441SEvalZero             current_thread->oncpu = RT_CPU_DETACHED;
539*10465441SEvalZero             if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
540*10465441SEvalZero             {
541*10465441SEvalZero                 if (current_thread->current_priority < highest_ready_priority)
542*10465441SEvalZero                 {
543*10465441SEvalZero                     to_thread = current_thread;
544*10465441SEvalZero                 }
545*10465441SEvalZero                 else
546*10465441SEvalZero                 {
547*10465441SEvalZero                     rt_schedule_insert_thread(current_thread);
548*10465441SEvalZero                 }
549*10465441SEvalZero             }
550*10465441SEvalZero             to_thread->oncpu = cpu_id;
551*10465441SEvalZero             if (to_thread != current_thread)
552*10465441SEvalZero             {
553*10465441SEvalZero                 /* if the destination thread is not the same as current thread */
554*10465441SEvalZero 
555*10465441SEvalZero                 pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
556*10465441SEvalZero 
557*10465441SEvalZero                 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
558*10465441SEvalZero 
559*10465441SEvalZero                 rt_schedule_remove_thread(to_thread);
560*10465441SEvalZero                 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
561*10465441SEvalZero 
562*10465441SEvalZero #ifdef RT_USING_OVERFLOW_CHECK
563*10465441SEvalZero                 _rt_scheduler_stack_check(to_thread);
564*10465441SEvalZero #endif
565*10465441SEvalZero                 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
566*10465441SEvalZero 
567*10465441SEvalZero                 current_thread->cpus_lock_nest--;
568*10465441SEvalZero                 current_thread->scheduler_lock_nest--;
569*10465441SEvalZero 
570*10465441SEvalZero                 rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
571*10465441SEvalZero                         (rt_ubase_t)&to_thread->sp, to_thread);
572*10465441SEvalZero             }
573*10465441SEvalZero         }
574*10465441SEvalZero     }
575*10465441SEvalZero     rt_hw_interrupt_enable(level);
576*10465441SEvalZero }
577*10465441SEvalZero #endif /*RT_USING_SMP*/
578*10465441SEvalZero 
579*10465441SEvalZero /*
580*10465441SEvalZero  * This function will insert a thread to system ready queue. The state of
581*10465441SEvalZero  * thread will be set as READY and remove from suspend queue.
582*10465441SEvalZero  *
583*10465441SEvalZero  * @param thread the thread to be inserted
584*10465441SEvalZero  * @note Please do not invoke this function in user application.
585*10465441SEvalZero  */
586*10465441SEvalZero #ifdef RT_USING_SMP
rt_schedule_insert_thread(struct rt_thread * thread)587*10465441SEvalZero void rt_schedule_insert_thread(struct rt_thread *thread)
588*10465441SEvalZero {
589*10465441SEvalZero     int cpu_id;
590*10465441SEvalZero     int bind_cpu;
591*10465441SEvalZero     rt_uint32_t cpu_mask;
592*10465441SEvalZero     register rt_base_t level;
593*10465441SEvalZero 
594*10465441SEvalZero     RT_ASSERT(thread != RT_NULL);
595*10465441SEvalZero 
596*10465441SEvalZero     /* disable interrupt */
597*10465441SEvalZero     level = rt_hw_interrupt_disable();
598*10465441SEvalZero 
599*10465441SEvalZero     /* it should be RUNNING thread */
600*10465441SEvalZero     if (thread->oncpu != RT_CPU_DETACHED)
601*10465441SEvalZero     {
602*10465441SEvalZero         thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
603*10465441SEvalZero         goto __exit;
604*10465441SEvalZero     }
605*10465441SEvalZero 
606*10465441SEvalZero     /* READY thread, insert to ready queue */
607*10465441SEvalZero     thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
608*10465441SEvalZero 
609*10465441SEvalZero     cpu_id   = rt_hw_cpu_id();
610*10465441SEvalZero     bind_cpu = thread->bind_cpu ;
611*10465441SEvalZero 
612*10465441SEvalZero     /* insert thread to ready list */
613*10465441SEvalZero     if (bind_cpu == RT_CPUS_NR)
614*10465441SEvalZero     {
615*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
616*10465441SEvalZero         rt_thread_ready_table[thread->number] |= thread->high_mask;
617*10465441SEvalZero #endif
618*10465441SEvalZero         rt_thread_ready_priority_group |= thread->number_mask;
619*10465441SEvalZero 
620*10465441SEvalZero         rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
621*10465441SEvalZero                               &(thread->tlist));
622*10465441SEvalZero         cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
623*10465441SEvalZero         rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
624*10465441SEvalZero     }
625*10465441SEvalZero     else
626*10465441SEvalZero     {
627*10465441SEvalZero         struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
628*10465441SEvalZero 
629*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
630*10465441SEvalZero         pcpu->ready_table[thread->number] |= thread->high_mask;
631*10465441SEvalZero #endif
632*10465441SEvalZero         pcpu->priority_group |= thread->number_mask;
633*10465441SEvalZero 
634*10465441SEvalZero         rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
635*10465441SEvalZero                               &(thread->tlist));
636*10465441SEvalZero 
637*10465441SEvalZero         if (cpu_id != bind_cpu)
638*10465441SEvalZero         {
639*10465441SEvalZero             cpu_mask = 1 << bind_cpu;
640*10465441SEvalZero             rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
641*10465441SEvalZero         }
642*10465441SEvalZero     }
643*10465441SEvalZero 
644*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
645*10465441SEvalZero                                       RT_NAME_MAX, thread->name, thread->current_priority));
646*10465441SEvalZero 
647*10465441SEvalZero __exit:
648*10465441SEvalZero     /* enable interrupt */
649*10465441SEvalZero     rt_hw_interrupt_enable(level);
650*10465441SEvalZero }
651*10465441SEvalZero #else
rt_schedule_insert_thread(struct rt_thread * thread)652*10465441SEvalZero void rt_schedule_insert_thread(struct rt_thread *thread)
653*10465441SEvalZero {
654*10465441SEvalZero     register rt_base_t temp;
655*10465441SEvalZero 
656*10465441SEvalZero     RT_ASSERT(thread != RT_NULL);
657*10465441SEvalZero 
658*10465441SEvalZero     /* disable interrupt */
659*10465441SEvalZero     temp = rt_hw_interrupt_disable();
660*10465441SEvalZero 
661*10465441SEvalZero     /* it's current thread, it should be RUNNING thread */
662*10465441SEvalZero     if (thread == rt_current_thread)
663*10465441SEvalZero     {
664*10465441SEvalZero         thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
665*10465441SEvalZero         goto __exit;
666*10465441SEvalZero     }
667*10465441SEvalZero 
668*10465441SEvalZero     /* READY thread, insert to ready queue */
669*10465441SEvalZero     thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
670*10465441SEvalZero     /* insert thread to ready list */
671*10465441SEvalZero     rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
672*10465441SEvalZero                           &(thread->tlist));
673*10465441SEvalZero 
674*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
675*10465441SEvalZero                                       RT_NAME_MAX, thread->name, thread->current_priority));
676*10465441SEvalZero 
677*10465441SEvalZero     /* set priority mask */
678*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
679*10465441SEvalZero     rt_thread_ready_table[thread->number] |= thread->high_mask;
680*10465441SEvalZero #endif
681*10465441SEvalZero     rt_thread_ready_priority_group |= thread->number_mask;
682*10465441SEvalZero 
683*10465441SEvalZero __exit:
684*10465441SEvalZero     /* enable interrupt */
685*10465441SEvalZero     rt_hw_interrupt_enable(temp);
686*10465441SEvalZero }
687*10465441SEvalZero #endif /*RT_USING_SMP*/
688*10465441SEvalZero 
689*10465441SEvalZero /*
690*10465441SEvalZero  * This function will remove a thread from system ready queue.
691*10465441SEvalZero  *
692*10465441SEvalZero  * @param thread the thread to be removed
693*10465441SEvalZero  *
694*10465441SEvalZero  * @note Please do not invoke this function in user application.
695*10465441SEvalZero  */
696*10465441SEvalZero #ifdef RT_USING_SMP
rt_schedule_remove_thread(struct rt_thread * thread)697*10465441SEvalZero void rt_schedule_remove_thread(struct rt_thread *thread)
698*10465441SEvalZero {
699*10465441SEvalZero     register rt_base_t level;
700*10465441SEvalZero 
701*10465441SEvalZero     RT_ASSERT(thread != RT_NULL);
702*10465441SEvalZero 
703*10465441SEvalZero     /* disable interrupt */
704*10465441SEvalZero     level = rt_hw_interrupt_disable();
705*10465441SEvalZero 
706*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
707*10465441SEvalZero                                       RT_NAME_MAX, thread->name,
708*10465441SEvalZero                                       thread->current_priority));
709*10465441SEvalZero 
710*10465441SEvalZero     /* remove thread from ready list */
711*10465441SEvalZero     rt_list_remove(&(thread->tlist));
712*10465441SEvalZero     if (thread->bind_cpu == RT_CPUS_NR)
713*10465441SEvalZero     {
714*10465441SEvalZero         if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
715*10465441SEvalZero         {
716*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
717*10465441SEvalZero             rt_thread_ready_table[thread->number] &= ~thread->high_mask;
718*10465441SEvalZero             if (rt_thread_ready_table[thread->number] == 0)
719*10465441SEvalZero             {
720*10465441SEvalZero                 rt_thread_ready_priority_group &= ~thread->number_mask;
721*10465441SEvalZero             }
722*10465441SEvalZero #else
723*10465441SEvalZero             rt_thread_ready_priority_group &= ~thread->number_mask;
724*10465441SEvalZero #endif
725*10465441SEvalZero         }
726*10465441SEvalZero     }
727*10465441SEvalZero     else
728*10465441SEvalZero     {
729*10465441SEvalZero         struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
730*10465441SEvalZero 
731*10465441SEvalZero         if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
732*10465441SEvalZero         {
733*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
734*10465441SEvalZero             pcpu->ready_table[thread->number] &= ~thread->high_mask;
735*10465441SEvalZero             if (rt_thread_ready_table[thread->number] == 0)
736*10465441SEvalZero             {
737*10465441SEvalZero                 pcpu->priority_group &= ~thread->number_mask;
738*10465441SEvalZero             }
739*10465441SEvalZero #else
740*10465441SEvalZero             pcpu->priority_group &= ~thread->number_mask;
741*10465441SEvalZero #endif
742*10465441SEvalZero         }
743*10465441SEvalZero     }
744*10465441SEvalZero 
745*10465441SEvalZero     /* enable interrupt */
746*10465441SEvalZero     rt_hw_interrupt_enable(level);
747*10465441SEvalZero }
748*10465441SEvalZero #else
rt_schedule_remove_thread(struct rt_thread * thread)749*10465441SEvalZero void rt_schedule_remove_thread(struct rt_thread *thread)
750*10465441SEvalZero {
751*10465441SEvalZero     register rt_base_t level;
752*10465441SEvalZero 
753*10465441SEvalZero     RT_ASSERT(thread != RT_NULL);
754*10465441SEvalZero 
755*10465441SEvalZero     /* disable interrupt */
756*10465441SEvalZero     level = rt_hw_interrupt_disable();
757*10465441SEvalZero 
758*10465441SEvalZero     RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
759*10465441SEvalZero                                       RT_NAME_MAX, thread->name,
760*10465441SEvalZero                                       thread->current_priority));
761*10465441SEvalZero 
762*10465441SEvalZero     /* remove thread from ready list */
763*10465441SEvalZero     rt_list_remove(&(thread->tlist));
764*10465441SEvalZero     if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
765*10465441SEvalZero     {
766*10465441SEvalZero #if RT_THREAD_PRIORITY_MAX > 32
767*10465441SEvalZero         rt_thread_ready_table[thread->number] &= ~thread->high_mask;
768*10465441SEvalZero         if (rt_thread_ready_table[thread->number] == 0)
769*10465441SEvalZero         {
770*10465441SEvalZero             rt_thread_ready_priority_group &= ~thread->number_mask;
771*10465441SEvalZero         }
772*10465441SEvalZero #else
773*10465441SEvalZero         rt_thread_ready_priority_group &= ~thread->number_mask;
774*10465441SEvalZero #endif
775*10465441SEvalZero     }
776*10465441SEvalZero 
777*10465441SEvalZero     /* enable interrupt */
778*10465441SEvalZero     rt_hw_interrupt_enable(level);
779*10465441SEvalZero }
780*10465441SEvalZero #endif /*RT_USING_SMP*/
781*10465441SEvalZero 
782*10465441SEvalZero /**
783*10465441SEvalZero  * This function will lock the thread scheduler.
784*10465441SEvalZero  */
785*10465441SEvalZero #ifdef RT_USING_SMP
rt_enter_critical(void)786*10465441SEvalZero void rt_enter_critical(void)
787*10465441SEvalZero {
788*10465441SEvalZero     register rt_base_t level;
789*10465441SEvalZero     struct rt_thread *current_thread;
790*10465441SEvalZero 
791*10465441SEvalZero     /* disable interrupt */
792*10465441SEvalZero     level = rt_hw_local_irq_disable();
793*10465441SEvalZero 
794*10465441SEvalZero     current_thread = rt_cpu_self()->current_thread;
795*10465441SEvalZero     /*
796*10465441SEvalZero      * the maximal number of nest is RT_UINT16_MAX, which is big
797*10465441SEvalZero      * enough and does not check here
798*10465441SEvalZero      */
799*10465441SEvalZero 
800*10465441SEvalZero     /* lock scheduler for all cpus */
801*10465441SEvalZero     if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
802*10465441SEvalZero     {
803*10465441SEvalZero         rt_hw_spin_lock(&_rt_critical_lock);
804*10465441SEvalZero     }
805*10465441SEvalZero 
806*10465441SEvalZero     /* lock scheduler for local cpu */
807*10465441SEvalZero     current_thread->scheduler_lock_nest ++;
808*10465441SEvalZero 
809*10465441SEvalZero     /* enable interrupt */
810*10465441SEvalZero     rt_hw_local_irq_enable(level);
811*10465441SEvalZero }
812*10465441SEvalZero #else
rt_enter_critical(void)813*10465441SEvalZero void rt_enter_critical(void)
814*10465441SEvalZero {
815*10465441SEvalZero     register rt_base_t level;
816*10465441SEvalZero 
817*10465441SEvalZero     /* disable interrupt */
818*10465441SEvalZero     level = rt_hw_interrupt_disable();
819*10465441SEvalZero 
820*10465441SEvalZero     /*
821*10465441SEvalZero      * the maximal number of nest is RT_UINT16_MAX, which is big
822*10465441SEvalZero      * enough and does not check here
823*10465441SEvalZero      */
824*10465441SEvalZero     rt_scheduler_lock_nest ++;
825*10465441SEvalZero 
826*10465441SEvalZero     /* enable interrupt */
827*10465441SEvalZero     rt_hw_interrupt_enable(level);
828*10465441SEvalZero }
829*10465441SEvalZero #endif /*RT_USING_SMP*/
830*10465441SEvalZero RTM_EXPORT(rt_enter_critical);
831*10465441SEvalZero 
832*10465441SEvalZero /**
833*10465441SEvalZero  * This function will unlock the thread scheduler.
834*10465441SEvalZero  */
835*10465441SEvalZero #ifdef RT_USING_SMP
rt_exit_critical(void)836*10465441SEvalZero void rt_exit_critical(void)
837*10465441SEvalZero {
838*10465441SEvalZero     register rt_base_t level;
839*10465441SEvalZero     struct rt_thread *current_thread;
840*10465441SEvalZero 
841*10465441SEvalZero     /* disable interrupt */
842*10465441SEvalZero     level = rt_hw_local_irq_disable();
843*10465441SEvalZero 
844*10465441SEvalZero     current_thread = rt_cpu_self()->current_thread;
845*10465441SEvalZero 
846*10465441SEvalZero     current_thread->scheduler_lock_nest --;
847*10465441SEvalZero 
848*10465441SEvalZero     if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
849*10465441SEvalZero     {
850*10465441SEvalZero         rt_hw_spin_unlock(&_rt_critical_lock);
851*10465441SEvalZero     }
852*10465441SEvalZero 
853*10465441SEvalZero     if (current_thread->scheduler_lock_nest <= 0)
854*10465441SEvalZero     {
855*10465441SEvalZero         current_thread->scheduler_lock_nest = 0;
856*10465441SEvalZero         /* enable interrupt */
857*10465441SEvalZero         rt_hw_local_irq_enable(level);
858*10465441SEvalZero 
859*10465441SEvalZero         rt_schedule();
860*10465441SEvalZero     }
861*10465441SEvalZero     else
862*10465441SEvalZero     {
863*10465441SEvalZero         /* enable interrupt */
864*10465441SEvalZero         rt_hw_local_irq_enable(level);
865*10465441SEvalZero     }
866*10465441SEvalZero }
867*10465441SEvalZero #else
rt_exit_critical(void)868*10465441SEvalZero void rt_exit_critical(void)
869*10465441SEvalZero {
870*10465441SEvalZero     register rt_base_t level;
871*10465441SEvalZero 
872*10465441SEvalZero     /* disable interrupt */
873*10465441SEvalZero     level = rt_hw_interrupt_disable();
874*10465441SEvalZero 
875*10465441SEvalZero     rt_scheduler_lock_nest --;
876*10465441SEvalZero 
877*10465441SEvalZero     if (rt_scheduler_lock_nest <= 0)
878*10465441SEvalZero     {
879*10465441SEvalZero         rt_scheduler_lock_nest = 0;
880*10465441SEvalZero         /* enable interrupt */
881*10465441SEvalZero         rt_hw_interrupt_enable(level);
882*10465441SEvalZero 
883*10465441SEvalZero         rt_schedule();
884*10465441SEvalZero     }
885*10465441SEvalZero     else
886*10465441SEvalZero     {
887*10465441SEvalZero         /* enable interrupt */
888*10465441SEvalZero         rt_hw_interrupt_enable(level);
889*10465441SEvalZero     }
890*10465441SEvalZero }
891*10465441SEvalZero #endif /*RT_USING_SMP*/
892*10465441SEvalZero RTM_EXPORT(rt_exit_critical);
893*10465441SEvalZero 
894*10465441SEvalZero /**
895*10465441SEvalZero  * Get the scheduler lock level
896*10465441SEvalZero  *
897*10465441SEvalZero  * @return the level of the scheduler lock. 0 means unlocked.
898*10465441SEvalZero  */
rt_critical_level(void)899*10465441SEvalZero rt_uint16_t rt_critical_level(void)
900*10465441SEvalZero {
901*10465441SEvalZero #ifdef RT_USING_SMP
902*10465441SEvalZero     struct rt_thread *current_thread = rt_cpu_self()->current_thread;
903*10465441SEvalZero 
904*10465441SEvalZero     return current_thread->scheduler_lock_nest;
905*10465441SEvalZero #else
906*10465441SEvalZero 	return rt_scheduler_lock_nest;
907*10465441SEvalZero #endif /*RT_USING_SMP*/
908*10465441SEvalZero }
909*10465441SEvalZero RTM_EXPORT(rt_critical_level);
910*10465441SEvalZero 
911*10465441SEvalZero /**@}*/
912