1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2006-03-17 Bernard the first version
9 * 2006-04-28 Bernard fix the scheduler algorthm
10 * 2006-04-30 Bernard add SCHEDULER_DEBUG
11 * 2006-05-27 Bernard fix the scheduler algorthm for same priority
12 * thread schedule
13 * 2006-06-04 Bernard rewrite the scheduler algorithm
14 * 2006-08-03 Bernard add hook support
15 * 2006-09-05 Bernard add 32 priority level support
16 * 2006-09-24 Bernard add rt_system_scheduler_start function
17 * 2009-09-16 Bernard fix _rt_scheduler_stack_check
18 * 2010-04-11 yi.qiu add module feature
19 * 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
20 * issue found by kuronca
21 * 2010-12-13 Bernard add defunct list initialization even if not use heap.
22 * 2011-05-10 Bernard clean scheduler debug log.
23 * 2013-12-21 Grissiom add rt_critical_level
24 * 2018-11-22 Jesven remove the current task from ready queue
25 * add per cpu ready queue
26 * add _get_highest_priority_thread to find highest priority task
27 * rt_schedule_insert_thread won't insert current task to ready queue
28 * in smp version, rt_hw_context_switch_interrupt maybe switch to
29 * new task directly
30 *
31 */
32
33 #include <rtthread.h>
34 #include <rthw.h>
35
36 #ifdef RT_USING_SMP
37 rt_hw_spinlock_t _rt_critical_lock;
38 #endif /*RT_USING_SMP*/
39
40 rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
41 rt_uint32_t rt_thread_ready_priority_group;
42 #if RT_THREAD_PRIORITY_MAX > 32
43 /* Maximum priority level, 256 */
44 rt_uint8_t rt_thread_ready_table[32];
45 #endif
46
47 #ifndef RT_USING_SMP
48 extern volatile rt_uint8_t rt_interrupt_nest;
49 static rt_int16_t rt_scheduler_lock_nest;
50 struct rt_thread *rt_current_thread;
51 rt_uint8_t rt_current_priority;
52 #endif /*RT_USING_SMP*/
53
54 rt_list_t rt_thread_defunct;
55
56 #ifdef RT_USING_HOOK
57 static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
58
59 /**
60 * @addtogroup Hook
61 */
62
63 /**@{*/
64
65 /**
66 * This function will set a hook function, which will be invoked when thread
67 * switch happens.
68 *
69 * @param hook the hook function
70 */
71 void
rt_scheduler_sethook(void (* hook)(struct rt_thread * from,struct rt_thread * to))72 rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
73 {
74 rt_scheduler_hook = hook;
75 }
76
77 /**@}*/
78 #endif
79
80 #ifdef RT_USING_OVERFLOW_CHECK
_rt_scheduler_stack_check(struct rt_thread * thread)81 static void _rt_scheduler_stack_check(struct rt_thread *thread)
82 {
83 RT_ASSERT(thread != RT_NULL);
84
85 #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
86 if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
87 #else
88 if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
89 #endif
90 (rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
91 (rt_ubase_t)thread->sp >
92 (rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
93 {
94 rt_ubase_t level;
95
96 rt_kprintf("thread:%s stack overflow\n", thread->name);
97 #ifdef RT_USING_FINSH
98 {
99 extern long list_thread(void);
100 list_thread();
101 }
102 #endif
103 level = rt_hw_interrupt_disable();
104 while (level);
105 }
106 #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
107 else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
108 {
109 rt_kprintf("warning: %s stack is close to the top of stack address.\n",
110 thread->name);
111 }
112 #else
113 else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
114 {
115 rt_kprintf("warning: %s stack is close to end of stack address.\n",
116 thread->name);
117 }
118 #endif
119 }
120 #endif
121
122 /*
123 * get the highest priority thread in ready queue
124 */
125 #ifdef RT_USING_SMP
_get_highest_priority_thread(rt_ubase_t * highest_prio)126 static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio)
127 {
128 register struct rt_thread *highest_priority_thread;
129 register rt_ubase_t highest_ready_priority, local_highest_ready_priority;
130 struct rt_cpu* pcpu = rt_cpu_self();
131
132 #if RT_THREAD_PRIORITY_MAX > 32
133 register rt_ubase_t number;
134
135 if (rt_thread_ready_priority_group == 0 && pcpu->priority_group == 0)
136 {
137 *highest_prio = pcpu->current_thread->current_priority;
138 /* only local IDLE is readly */
139 return pcpu->current_thread;
140 }
141
142 number = __rt_ffs(rt_thread_ready_priority_group) - 1;
143 highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
144 number = __rt_ffs(pcpu->priority_group) - 1;
145 local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
146 #else
147 highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
148 local_highest_ready_priority = __rt_ffs(pcpu->priority_group) - 1;
149 #endif
150
151 /* get highest ready priority thread */
152 if (highest_ready_priority < local_highest_ready_priority)
153 {
154 *highest_prio = highest_ready_priority;
155 highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
156 struct rt_thread,
157 tlist);
158 }
159 else
160 {
161 *highest_prio = local_highest_ready_priority;
162 highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next,
163 struct rt_thread,
164 tlist);
165 }
166
167 return highest_priority_thread;
168 }
169 #else
_get_highest_priority_thread(rt_ubase_t * highest_prio)170 static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio)
171 {
172 register struct rt_thread *highest_priority_thread;
173 register rt_ubase_t highest_ready_priority;
174
175 #if RT_THREAD_PRIORITY_MAX > 32
176 register rt_ubase_t number;
177
178 number = __rt_ffs(rt_thread_ready_priority_group) - 1;
179 highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
180 #else
181 highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
182 #endif
183
184 /* get highest ready priority thread */
185 highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
186 struct rt_thread,
187 tlist);
188
189 *highest_prio = highest_ready_priority;
190
191 return highest_priority_thread;
192 }
193 #endif
194
195 /**
196 * @ingroup SystemInit
197 * This function will initialize the system scheduler
198 */
rt_system_scheduler_init(void)199 void rt_system_scheduler_init(void)
200 {
201 #ifdef RT_USING_SMP
202 int cpu;
203 #endif /*RT_USING_SMP*/
204 register rt_base_t offset;
205
206 #ifndef RT_USING_SMP
207 rt_scheduler_lock_nest = 0;
208 #endif /*RT_USING_SMP*/
209
210 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",
211 RT_THREAD_PRIORITY_MAX));
212
213 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
214 {
215 rt_list_init(&rt_thread_priority_table[offset]);
216 }
217 #ifdef RT_USING_SMP
218 for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
219 {
220 struct rt_cpu *pcpu = rt_cpu_index(cpu);
221 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
222 {
223 rt_list_init(&pcpu->priority_table[offset]);
224 }
225
226 pcpu->irq_switch_flag = 0;
227 pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
228 pcpu->current_thread = RT_NULL;
229 pcpu->priority_group = 0;
230
231 #if RT_THREAD_PRIORITY_MAX > 32
232 rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
233 #endif
234 }
235 #endif /*RT_USING_SMP*/
236
237 /* initialize ready priority group */
238 rt_thread_ready_priority_group = 0;
239
240 #if RT_THREAD_PRIORITY_MAX > 32
241 /* initialize ready table */
242 rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
243 #endif
244
245 /* initialize thread defunct */
246 rt_list_init(&rt_thread_defunct);
247 }
248
249 /**
250 * @ingroup SystemInit
251 * This function will startup scheduler. It will select one thread
252 * with the highest priority level, then switch to it.
253 */
rt_system_scheduler_start(void)254 void rt_system_scheduler_start(void)
255 {
256 register struct rt_thread *to_thread;
257 rt_ubase_t highest_ready_priority;
258
259 to_thread = _get_highest_priority_thread(&highest_ready_priority);
260
261 #ifdef RT_USING_SMP
262 to_thread->oncpu = rt_hw_cpu_id();
263 #else
264 rt_current_thread = to_thread;
265 #endif /*RT_USING_SMP*/
266
267 rt_schedule_remove_thread(to_thread);
268 to_thread->stat = RT_THREAD_RUNNING;
269
270 /* switch to new thread */
271 #ifdef RT_USING_SMP
272 rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
273 #else
274 rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp);
275 #endif /*RT_USING_SMP*/
276
277 /* never come back */
278 }
279
280 /**
281 * @addtogroup Thread
282 */
283
284 /**@{*/
285
286
287 #ifdef RT_USING_SMP
288 /**
289 * This function will handle IPI interrupt and do a scheduling in system;
290 *
291 * @param vector, the number of IPI interrupt for system scheduling
292 * @param param, use RT_NULL
293 *
294 * NOTE: this function should be invoke or register as ISR in BSP.
295 */
rt_scheduler_ipi_handler(int vector,void * param)296 void rt_scheduler_ipi_handler(int vector, void *param)
297 {
298 rt_schedule();
299 }
300
301 /**
302 * This function will perform one scheduling. It will select one thread
303 * with the highest priority level in global ready queue or local ready queue,
304 * then switch to it.
305 */
rt_schedule(void)306 void rt_schedule(void)
307 {
308 rt_base_t level;
309 struct rt_thread *to_thread;
310 struct rt_thread *current_thread;
311 struct rt_cpu *pcpu;
312 int cpu_id;
313
314 /* disable interrupt */
315 level = rt_hw_interrupt_disable();
316
317 cpu_id = rt_hw_cpu_id();
318 pcpu = rt_cpu_index(cpu_id);
319 current_thread = pcpu->current_thread;
320
321 /* whether do switch in interrupt */
322 if (pcpu->irq_nest)
323 {
324 pcpu->irq_switch_flag = 1;
325 }
326 else if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */
327 {
328 rt_ubase_t highest_ready_priority;
329
330 if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
331 {
332 to_thread = _get_highest_priority_thread(&highest_ready_priority);
333 current_thread->oncpu = RT_CPU_DETACHED;
334 if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
335 {
336 if (current_thread->current_priority < highest_ready_priority)
337 {
338 to_thread = current_thread;
339 }
340 else
341 {
342 rt_schedule_insert_thread(current_thread);
343 }
344 }
345 to_thread->oncpu = cpu_id;
346 if (to_thread != current_thread)
347 {
348 /* if the destination thread is not the same as current thread */
349 pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
350
351 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
352
353 rt_schedule_remove_thread(to_thread);
354 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
355
356 /* switch to new thread */
357 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
358 ("[%d]switch to priority#%d "
359 "thread:%.*s(sp:0x%08x), "
360 "from thread:%.*s(sp: 0x%08x)\n",
361 pcpu->irq_nest, highest_ready_priority,
362 RT_NAME_MAX, to_thread->name, to_thread->sp,
363 RT_NAME_MAX, current_thread->name, current_thread->sp));
364
365 #ifdef RT_USING_OVERFLOW_CHECK
366 _rt_scheduler_stack_check(to_thread);
367 #endif
368
369 {
370 extern void rt_thread_handle_sig(rt_bool_t clean_state);
371
372 rt_hw_context_switch((rt_ubase_t)¤t_thread->sp,
373 (rt_ubase_t)&to_thread->sp, to_thread);
374
375 /* enable interrupt */
376 rt_hw_interrupt_enable(level);
377
378 #ifdef RT_USING_SIGNALS
379 /* check signal status */
380 rt_thread_handle_sig(RT_TRUE);
381 #endif
382 goto __exit;
383 }
384 }
385 }
386 }
387
388 /* enable interrupt */
389 rt_hw_interrupt_enable(level);
390
391 __exit:
392 return ;
393 }
394 #else
395 /**
396 * This function will perform one schedule. It will select one thread
397 * with the highest priority level, then switch to it.
398 */
rt_schedule(void)399 void rt_schedule(void)
400 {
401 rt_base_t level;
402 struct rt_thread *to_thread;
403 struct rt_thread *from_thread;
404
405 /* disable interrupt */
406 level = rt_hw_interrupt_disable();
407
408 /* check the scheduler is enabled or not */
409 if (rt_scheduler_lock_nest == 0)
410 {
411 rt_ubase_t highest_ready_priority;
412
413 if (rt_thread_ready_priority_group != 0)
414 {
415 /* need_insert_from_thread: need to insert from_thread to ready queue */
416 int need_insert_from_thread = 0;
417
418 to_thread = _get_highest_priority_thread(&highest_ready_priority);
419
420 if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
421 {
422 if (rt_current_thread->current_priority < highest_ready_priority)
423 {
424 to_thread = rt_current_thread;
425 }
426 else
427 {
428 need_insert_from_thread = 1;
429 }
430 }
431
432 if (to_thread != rt_current_thread)
433 {
434 /* if the destination thread is not the same as current thread */
435 rt_current_priority = (rt_uint8_t)highest_ready_priority;
436 from_thread = rt_current_thread;
437 rt_current_thread = to_thread;
438
439 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
440
441 if (need_insert_from_thread)
442 {
443 rt_schedule_insert_thread(from_thread);
444 }
445
446 rt_schedule_remove_thread(to_thread);
447 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
448
449 /* switch to new thread */
450 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
451 ("[%d]switch to priority#%d "
452 "thread:%.*s(sp:0x%08x), "
453 "from thread:%.*s(sp: 0x%08x)\n",
454 rt_interrupt_nest, highest_ready_priority,
455 RT_NAME_MAX, to_thread->name, to_thread->sp,
456 RT_NAME_MAX, from_thread->name, from_thread->sp));
457
458 #ifdef RT_USING_OVERFLOW_CHECK
459 _rt_scheduler_stack_check(to_thread);
460 #endif
461
462 if (rt_interrupt_nest == 0)
463 {
464 extern void rt_thread_handle_sig(rt_bool_t clean_state);
465
466 rt_hw_context_switch((rt_ubase_t)&from_thread->sp,
467 (rt_ubase_t)&to_thread->sp);
468
469 /* enable interrupt */
470 rt_hw_interrupt_enable(level);
471
472 #ifdef RT_USING_SIGNALS
473 /* check signal status */
474 rt_thread_handle_sig(RT_TRUE);
475 #endif
476 goto __exit;
477 }
478 else
479 {
480 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
481
482 rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp,
483 (rt_ubase_t)&to_thread->sp);
484 }
485 }
486 else
487 {
488 rt_schedule_remove_thread(rt_current_thread);
489 rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
490 }
491 }
492 }
493
494 /* enable interrupt */
495 rt_hw_interrupt_enable(level);
496
497 __exit:
498 return;
499 }
500 #endif /*RT_USING_SMP*/
501
502 /**
503 * This function checks if a scheduling is needed after IRQ context. If yes,
504 * it will select one thread with the highest priority level, and then switch
505 * to it.
506 */
507 #ifdef RT_USING_SMP
rt_scheduler_do_irq_switch(void * context)508 void rt_scheduler_do_irq_switch(void *context)
509 {
510 int cpu_id;
511 rt_base_t level;
512 struct rt_cpu* pcpu;
513 struct rt_thread *to_thread;
514 struct rt_thread *current_thread;
515
516 level = rt_hw_interrupt_disable();
517
518 cpu_id = rt_hw_cpu_id();
519 pcpu = rt_cpu_index(cpu_id);
520 current_thread = pcpu->current_thread;
521
522 if (pcpu->irq_switch_flag == 0)
523 {
524 rt_hw_interrupt_enable(level);
525 return;
526 }
527
528 if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0)
529 {
530 rt_ubase_t highest_ready_priority;
531
532 /* clear irq switch flag */
533 pcpu->irq_switch_flag = 0;
534
535 if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
536 {
537 to_thread = _get_highest_priority_thread(&highest_ready_priority);
538 current_thread->oncpu = RT_CPU_DETACHED;
539 if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
540 {
541 if (current_thread->current_priority < highest_ready_priority)
542 {
543 to_thread = current_thread;
544 }
545 else
546 {
547 rt_schedule_insert_thread(current_thread);
548 }
549 }
550 to_thread->oncpu = cpu_id;
551 if (to_thread != current_thread)
552 {
553 /* if the destination thread is not the same as current thread */
554
555 pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
556
557 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
558
559 rt_schedule_remove_thread(to_thread);
560 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
561
562 #ifdef RT_USING_OVERFLOW_CHECK
563 _rt_scheduler_stack_check(to_thread);
564 #endif
565 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
566
567 current_thread->cpus_lock_nest--;
568 current_thread->scheduler_lock_nest--;
569
570 rt_hw_context_switch_interrupt(context, (rt_ubase_t)¤t_thread->sp,
571 (rt_ubase_t)&to_thread->sp, to_thread);
572 }
573 }
574 }
575 rt_hw_interrupt_enable(level);
576 }
577 #endif /*RT_USING_SMP*/
578
579 /*
580 * This function will insert a thread to system ready queue. The state of
581 * thread will be set as READY and remove from suspend queue.
582 *
583 * @param thread the thread to be inserted
584 * @note Please do not invoke this function in user application.
585 */
586 #ifdef RT_USING_SMP
rt_schedule_insert_thread(struct rt_thread * thread)587 void rt_schedule_insert_thread(struct rt_thread *thread)
588 {
589 int cpu_id;
590 int bind_cpu;
591 rt_uint32_t cpu_mask;
592 register rt_base_t level;
593
594 RT_ASSERT(thread != RT_NULL);
595
596 /* disable interrupt */
597 level = rt_hw_interrupt_disable();
598
599 /* it should be RUNNING thread */
600 if (thread->oncpu != RT_CPU_DETACHED)
601 {
602 thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
603 goto __exit;
604 }
605
606 /* READY thread, insert to ready queue */
607 thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
608
609 cpu_id = rt_hw_cpu_id();
610 bind_cpu = thread->bind_cpu ;
611
612 /* insert thread to ready list */
613 if (bind_cpu == RT_CPUS_NR)
614 {
615 #if RT_THREAD_PRIORITY_MAX > 32
616 rt_thread_ready_table[thread->number] |= thread->high_mask;
617 #endif
618 rt_thread_ready_priority_group |= thread->number_mask;
619
620 rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
621 &(thread->tlist));
622 cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
623 rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
624 }
625 else
626 {
627 struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
628
629 #if RT_THREAD_PRIORITY_MAX > 32
630 pcpu->ready_table[thread->number] |= thread->high_mask;
631 #endif
632 pcpu->priority_group |= thread->number_mask;
633
634 rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
635 &(thread->tlist));
636
637 if (cpu_id != bind_cpu)
638 {
639 cpu_mask = 1 << bind_cpu;
640 rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
641 }
642 }
643
644 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
645 RT_NAME_MAX, thread->name, thread->current_priority));
646
647 __exit:
648 /* enable interrupt */
649 rt_hw_interrupt_enable(level);
650 }
651 #else
rt_schedule_insert_thread(struct rt_thread * thread)652 void rt_schedule_insert_thread(struct rt_thread *thread)
653 {
654 register rt_base_t temp;
655
656 RT_ASSERT(thread != RT_NULL);
657
658 /* disable interrupt */
659 temp = rt_hw_interrupt_disable();
660
661 /* it's current thread, it should be RUNNING thread */
662 if (thread == rt_current_thread)
663 {
664 thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
665 goto __exit;
666 }
667
668 /* READY thread, insert to ready queue */
669 thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
670 /* insert thread to ready list */
671 rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
672 &(thread->tlist));
673
674 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
675 RT_NAME_MAX, thread->name, thread->current_priority));
676
677 /* set priority mask */
678 #if RT_THREAD_PRIORITY_MAX > 32
679 rt_thread_ready_table[thread->number] |= thread->high_mask;
680 #endif
681 rt_thread_ready_priority_group |= thread->number_mask;
682
683 __exit:
684 /* enable interrupt */
685 rt_hw_interrupt_enable(temp);
686 }
687 #endif /*RT_USING_SMP*/
688
689 /*
690 * This function will remove a thread from system ready queue.
691 *
692 * @param thread the thread to be removed
693 *
694 * @note Please do not invoke this function in user application.
695 */
696 #ifdef RT_USING_SMP
rt_schedule_remove_thread(struct rt_thread * thread)697 void rt_schedule_remove_thread(struct rt_thread *thread)
698 {
699 register rt_base_t level;
700
701 RT_ASSERT(thread != RT_NULL);
702
703 /* disable interrupt */
704 level = rt_hw_interrupt_disable();
705
706 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
707 RT_NAME_MAX, thread->name,
708 thread->current_priority));
709
710 /* remove thread from ready list */
711 rt_list_remove(&(thread->tlist));
712 if (thread->bind_cpu == RT_CPUS_NR)
713 {
714 if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
715 {
716 #if RT_THREAD_PRIORITY_MAX > 32
717 rt_thread_ready_table[thread->number] &= ~thread->high_mask;
718 if (rt_thread_ready_table[thread->number] == 0)
719 {
720 rt_thread_ready_priority_group &= ~thread->number_mask;
721 }
722 #else
723 rt_thread_ready_priority_group &= ~thread->number_mask;
724 #endif
725 }
726 }
727 else
728 {
729 struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
730
731 if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
732 {
733 #if RT_THREAD_PRIORITY_MAX > 32
734 pcpu->ready_table[thread->number] &= ~thread->high_mask;
735 if (rt_thread_ready_table[thread->number] == 0)
736 {
737 pcpu->priority_group &= ~thread->number_mask;
738 }
739 #else
740 pcpu->priority_group &= ~thread->number_mask;
741 #endif
742 }
743 }
744
745 /* enable interrupt */
746 rt_hw_interrupt_enable(level);
747 }
748 #else
rt_schedule_remove_thread(struct rt_thread * thread)749 void rt_schedule_remove_thread(struct rt_thread *thread)
750 {
751 register rt_base_t level;
752
753 RT_ASSERT(thread != RT_NULL);
754
755 /* disable interrupt */
756 level = rt_hw_interrupt_disable();
757
758 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
759 RT_NAME_MAX, thread->name,
760 thread->current_priority));
761
762 /* remove thread from ready list */
763 rt_list_remove(&(thread->tlist));
764 if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
765 {
766 #if RT_THREAD_PRIORITY_MAX > 32
767 rt_thread_ready_table[thread->number] &= ~thread->high_mask;
768 if (rt_thread_ready_table[thread->number] == 0)
769 {
770 rt_thread_ready_priority_group &= ~thread->number_mask;
771 }
772 #else
773 rt_thread_ready_priority_group &= ~thread->number_mask;
774 #endif
775 }
776
777 /* enable interrupt */
778 rt_hw_interrupt_enable(level);
779 }
780 #endif /*RT_USING_SMP*/
781
782 /**
783 * This function will lock the thread scheduler.
784 */
785 #ifdef RT_USING_SMP
rt_enter_critical(void)786 void rt_enter_critical(void)
787 {
788 register rt_base_t level;
789 struct rt_thread *current_thread;
790
791 /* disable interrupt */
792 level = rt_hw_local_irq_disable();
793
794 current_thread = rt_cpu_self()->current_thread;
795 /*
796 * the maximal number of nest is RT_UINT16_MAX, which is big
797 * enough and does not check here
798 */
799
800 /* lock scheduler for all cpus */
801 if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
802 {
803 rt_hw_spin_lock(&_rt_critical_lock);
804 }
805
806 /* lock scheduler for local cpu */
807 current_thread->scheduler_lock_nest ++;
808
809 /* enable interrupt */
810 rt_hw_local_irq_enable(level);
811 }
812 #else
rt_enter_critical(void)813 void rt_enter_critical(void)
814 {
815 register rt_base_t level;
816
817 /* disable interrupt */
818 level = rt_hw_interrupt_disable();
819
820 /*
821 * the maximal number of nest is RT_UINT16_MAX, which is big
822 * enough and does not check here
823 */
824 rt_scheduler_lock_nest ++;
825
826 /* enable interrupt */
827 rt_hw_interrupt_enable(level);
828 }
829 #endif /*RT_USING_SMP*/
830 RTM_EXPORT(rt_enter_critical);
831
832 /**
833 * This function will unlock the thread scheduler.
834 */
835 #ifdef RT_USING_SMP
rt_exit_critical(void)836 void rt_exit_critical(void)
837 {
838 register rt_base_t level;
839 struct rt_thread *current_thread;
840
841 /* disable interrupt */
842 level = rt_hw_local_irq_disable();
843
844 current_thread = rt_cpu_self()->current_thread;
845
846 current_thread->scheduler_lock_nest --;
847
848 if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
849 {
850 rt_hw_spin_unlock(&_rt_critical_lock);
851 }
852
853 if (current_thread->scheduler_lock_nest <= 0)
854 {
855 current_thread->scheduler_lock_nest = 0;
856 /* enable interrupt */
857 rt_hw_local_irq_enable(level);
858
859 rt_schedule();
860 }
861 else
862 {
863 /* enable interrupt */
864 rt_hw_local_irq_enable(level);
865 }
866 }
867 #else
rt_exit_critical(void)868 void rt_exit_critical(void)
869 {
870 register rt_base_t level;
871
872 /* disable interrupt */
873 level = rt_hw_interrupt_disable();
874
875 rt_scheduler_lock_nest --;
876
877 if (rt_scheduler_lock_nest <= 0)
878 {
879 rt_scheduler_lock_nest = 0;
880 /* enable interrupt */
881 rt_hw_interrupt_enable(level);
882
883 rt_schedule();
884 }
885 else
886 {
887 /* enable interrupt */
888 rt_hw_interrupt_enable(level);
889 }
890 }
891 #endif /*RT_USING_SMP*/
892 RTM_EXPORT(rt_exit_critical);
893
894 /**
895 * Get the scheduler lock level
896 *
897 * @return the level of the scheduler lock. 0 means unlocked.
898 */
rt_critical_level(void)899 rt_uint16_t rt_critical_level(void)
900 {
901 #ifdef RT_USING_SMP
902 struct rt_thread *current_thread = rt_cpu_self()->current_thread;
903
904 return current_thread->scheduler_lock_nest;
905 #else
906 return rt_scheduler_lock_nest;
907 #endif /*RT_USING_SMP*/
908 }
909 RTM_EXPORT(rt_critical_level);
910
911 /**@}*/
912