1*10465441SEvalZero /*
2*10465441SEvalZero * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero *
4*10465441SEvalZero * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero *
6*10465441SEvalZero * Change Logs:
7*10465441SEvalZero * Date Author Notes
8*10465441SEvalZero * 2006-03-23 Bernard the first version
9*10465441SEvalZero * 2010-11-10 Bernard add cleanup callback function in thread exit.
10*10465441SEvalZero * 2012-12-29 Bernard fix compiling warning.
11*10465441SEvalZero * 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
12*10465441SEvalZero * dead thread.
13*10465441SEvalZero * 2016-08-09 ArdaFu add method to get the handler of the idle thread.
14*10465441SEvalZero * 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
15*10465441SEvalZero * 2018-07-14 armink add idle hook list
16*10465441SEvalZero * 2018-11-22 Jesven add per cpu idle task
17*10465441SEvalZero * combine the code of primary and secondary cpu
18*10465441SEvalZero */
19*10465441SEvalZero
20*10465441SEvalZero #include <rthw.h>
21*10465441SEvalZero #include <rtthread.h>
22*10465441SEvalZero
23*10465441SEvalZero #ifdef RT_USING_MODULE
24*10465441SEvalZero #include <dlmodule.h>
25*10465441SEvalZero #endif
26*10465441SEvalZero
27*10465441SEvalZero #if defined (RT_USING_HOOK)
28*10465441SEvalZero #ifndef RT_USING_IDLE_HOOK
29*10465441SEvalZero #define RT_USING_IDLE_HOOK
30*10465441SEvalZero #endif
31*10465441SEvalZero #endif
32*10465441SEvalZero
33*10465441SEvalZero #ifndef IDLE_THREAD_STACK_SIZE
34*10465441SEvalZero #if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
35*10465441SEvalZero #define IDLE_THREAD_STACK_SIZE 256
36*10465441SEvalZero #else
37*10465441SEvalZero #define IDLE_THREAD_STACK_SIZE 128
38*10465441SEvalZero #endif
39*10465441SEvalZero #endif
40*10465441SEvalZero
41*10465441SEvalZero #ifdef RT_USING_SMP
42*10465441SEvalZero #define _CPUS_NR RT_CPUS_NR
43*10465441SEvalZero #else
44*10465441SEvalZero #define _CPUS_NR 1
45*10465441SEvalZero #endif
46*10465441SEvalZero
47*10465441SEvalZero extern rt_list_t rt_thread_defunct;
48*10465441SEvalZero
49*10465441SEvalZero static struct rt_thread idle[_CPUS_NR];
50*10465441SEvalZero ALIGN(RT_ALIGN_SIZE)
51*10465441SEvalZero static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
52*10465441SEvalZero
53*10465441SEvalZero #ifdef RT_USING_IDLE_HOOK
54*10465441SEvalZero #ifndef RT_IDEL_HOOK_LIST_SIZE
55*10465441SEvalZero #define RT_IDEL_HOOK_LIST_SIZE 4
56*10465441SEvalZero #endif
57*10465441SEvalZero
58*10465441SEvalZero static void (*idle_hook_list[RT_IDEL_HOOK_LIST_SIZE])();
59*10465441SEvalZero
60*10465441SEvalZero /**
61*10465441SEvalZero * @ingroup Hook
62*10465441SEvalZero * This function sets a hook function to idle thread loop. When the system performs
63*10465441SEvalZero * idle loop, this hook function should be invoked.
64*10465441SEvalZero *
65*10465441SEvalZero * @param hook the specified hook function
66*10465441SEvalZero *
67*10465441SEvalZero * @return RT_EOK: set OK
68*10465441SEvalZero * -RT_EFULL: hook list is full
69*10465441SEvalZero *
70*10465441SEvalZero * @note the hook function must be simple and never be blocked or suspend.
71*10465441SEvalZero */
rt_thread_idle_sethook(void (* hook)(void))72*10465441SEvalZero rt_err_t rt_thread_idle_sethook(void (*hook)(void))
73*10465441SEvalZero {
74*10465441SEvalZero rt_size_t i;
75*10465441SEvalZero rt_base_t level;
76*10465441SEvalZero rt_err_t ret = -RT_EFULL;
77*10465441SEvalZero
78*10465441SEvalZero /* disable interrupt */
79*10465441SEvalZero level = rt_hw_interrupt_disable();
80*10465441SEvalZero
81*10465441SEvalZero for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
82*10465441SEvalZero {
83*10465441SEvalZero if (idle_hook_list[i] == RT_NULL)
84*10465441SEvalZero {
85*10465441SEvalZero idle_hook_list[i] = hook;
86*10465441SEvalZero ret = RT_EOK;
87*10465441SEvalZero break;
88*10465441SEvalZero }
89*10465441SEvalZero }
90*10465441SEvalZero /* enable interrupt */
91*10465441SEvalZero rt_hw_interrupt_enable(level);
92*10465441SEvalZero
93*10465441SEvalZero return ret;
94*10465441SEvalZero }
95*10465441SEvalZero
96*10465441SEvalZero /**
97*10465441SEvalZero * delete the idle hook on hook list
98*10465441SEvalZero *
99*10465441SEvalZero * @param hook the specified hook function
100*10465441SEvalZero *
101*10465441SEvalZero * @return RT_EOK: delete OK
102*10465441SEvalZero * -RT_ENOSYS: hook was not found
103*10465441SEvalZero */
rt_thread_idle_delhook(void (* hook)(void))104*10465441SEvalZero rt_err_t rt_thread_idle_delhook(void (*hook)(void))
105*10465441SEvalZero {
106*10465441SEvalZero rt_size_t i;
107*10465441SEvalZero rt_base_t level;
108*10465441SEvalZero rt_err_t ret = -RT_ENOSYS;
109*10465441SEvalZero
110*10465441SEvalZero /* disable interrupt */
111*10465441SEvalZero level = rt_hw_interrupt_disable();
112*10465441SEvalZero
113*10465441SEvalZero for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
114*10465441SEvalZero {
115*10465441SEvalZero if (idle_hook_list[i] == hook)
116*10465441SEvalZero {
117*10465441SEvalZero idle_hook_list[i] = RT_NULL;
118*10465441SEvalZero ret = RT_EOK;
119*10465441SEvalZero break;
120*10465441SEvalZero }
121*10465441SEvalZero }
122*10465441SEvalZero /* enable interrupt */
123*10465441SEvalZero rt_hw_interrupt_enable(level);
124*10465441SEvalZero
125*10465441SEvalZero return ret;
126*10465441SEvalZero }
127*10465441SEvalZero
128*10465441SEvalZero #endif
129*10465441SEvalZero
130*10465441SEvalZero /* Return whether there is defunctional thread to be deleted. */
_has_defunct_thread(void)131*10465441SEvalZero rt_inline int _has_defunct_thread(void)
132*10465441SEvalZero {
133*10465441SEvalZero /* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)".
134*10465441SEvalZero * So the compiler has a good reason that the rt_thread_defunct list does
135*10465441SEvalZero * not change within rt_thread_idle_excute thus optimize the "while" loop
136*10465441SEvalZero * into a "if".
137*10465441SEvalZero *
138*10465441SEvalZero * So add the volatile qualifier here. */
139*10465441SEvalZero const volatile rt_list_t *l = (const volatile rt_list_t *)&rt_thread_defunct;
140*10465441SEvalZero
141*10465441SEvalZero return l->next != l;
142*10465441SEvalZero }
143*10465441SEvalZero
144*10465441SEvalZero /**
145*10465441SEvalZero * @ingroup Thread
146*10465441SEvalZero *
147*10465441SEvalZero * This function will perform system background job when system idle.
148*10465441SEvalZero */
rt_thread_idle_excute(void)149*10465441SEvalZero void rt_thread_idle_excute(void)
150*10465441SEvalZero {
151*10465441SEvalZero /* Loop until there is no dead thread. So one call to rt_thread_idle_excute
152*10465441SEvalZero * will do all the cleanups. */
153*10465441SEvalZero while (_has_defunct_thread())
154*10465441SEvalZero {
155*10465441SEvalZero rt_base_t lock;
156*10465441SEvalZero rt_thread_t thread;
157*10465441SEvalZero #ifdef RT_USING_MODULE
158*10465441SEvalZero struct rt_dlmodule *module = RT_NULL;
159*10465441SEvalZero #endif
160*10465441SEvalZero RT_DEBUG_NOT_IN_INTERRUPT;
161*10465441SEvalZero
162*10465441SEvalZero /* disable interrupt */
163*10465441SEvalZero lock = rt_hw_interrupt_disable();
164*10465441SEvalZero
165*10465441SEvalZero /* re-check whether list is empty */
166*10465441SEvalZero if (_has_defunct_thread())
167*10465441SEvalZero {
168*10465441SEvalZero /* get defunct thread */
169*10465441SEvalZero thread = rt_list_entry(rt_thread_defunct.next,
170*10465441SEvalZero struct rt_thread,
171*10465441SEvalZero tlist);
172*10465441SEvalZero #ifdef RT_USING_MODULE
173*10465441SEvalZero module = (struct rt_dlmodule*)thread->module_id;
174*10465441SEvalZero if (module)
175*10465441SEvalZero {
176*10465441SEvalZero dlmodule_destroy(module);
177*10465441SEvalZero }
178*10465441SEvalZero #endif
179*10465441SEvalZero /* remove defunct thread */
180*10465441SEvalZero rt_list_remove(&(thread->tlist));
181*10465441SEvalZero
182*10465441SEvalZero /* lock scheduler to prevent scheduling in cleanup function. */
183*10465441SEvalZero rt_enter_critical();
184*10465441SEvalZero
185*10465441SEvalZero /* invoke thread cleanup */
186*10465441SEvalZero if (thread->cleanup != RT_NULL)
187*10465441SEvalZero thread->cleanup(thread);
188*10465441SEvalZero
189*10465441SEvalZero #ifdef RT_USING_SIGNALS
190*10465441SEvalZero rt_thread_free_sig(thread);
191*10465441SEvalZero #endif
192*10465441SEvalZero
193*10465441SEvalZero /* if it's a system object, not delete it */
194*10465441SEvalZero if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
195*10465441SEvalZero {
196*10465441SEvalZero /* unlock scheduler */
197*10465441SEvalZero rt_exit_critical();
198*10465441SEvalZero
199*10465441SEvalZero /* enable interrupt */
200*10465441SEvalZero rt_hw_interrupt_enable(lock);
201*10465441SEvalZero
202*10465441SEvalZero return;
203*10465441SEvalZero }
204*10465441SEvalZero
205*10465441SEvalZero /* unlock scheduler */
206*10465441SEvalZero rt_exit_critical();
207*10465441SEvalZero }
208*10465441SEvalZero else
209*10465441SEvalZero {
210*10465441SEvalZero /* enable interrupt */
211*10465441SEvalZero rt_hw_interrupt_enable(lock);
212*10465441SEvalZero
213*10465441SEvalZero /* may the defunct thread list is removed by others, just return */
214*10465441SEvalZero return;
215*10465441SEvalZero }
216*10465441SEvalZero
217*10465441SEvalZero /* enable interrupt */
218*10465441SEvalZero rt_hw_interrupt_enable(lock);
219*10465441SEvalZero
220*10465441SEvalZero #ifdef RT_USING_HEAP
221*10465441SEvalZero /* release thread's stack */
222*10465441SEvalZero RT_KERNEL_FREE(thread->stack_addr);
223*10465441SEvalZero /* delete thread object */
224*10465441SEvalZero rt_object_delete((rt_object_t)thread);
225*10465441SEvalZero #endif
226*10465441SEvalZero }
227*10465441SEvalZero }
228*10465441SEvalZero
rt_thread_idle_entry(void * parameter)229*10465441SEvalZero static void rt_thread_idle_entry(void *parameter)
230*10465441SEvalZero {
231*10465441SEvalZero #ifdef RT_USING_SMP
232*10465441SEvalZero if (rt_hw_cpu_id() != 0)
233*10465441SEvalZero {
234*10465441SEvalZero while (1)
235*10465441SEvalZero {
236*10465441SEvalZero rt_hw_secondary_cpu_idle_exec();
237*10465441SEvalZero }
238*10465441SEvalZero }
239*10465441SEvalZero #endif
240*10465441SEvalZero
241*10465441SEvalZero while (1)
242*10465441SEvalZero {
243*10465441SEvalZero #ifdef RT_USING_IDLE_HOOK
244*10465441SEvalZero rt_size_t i;
245*10465441SEvalZero
246*10465441SEvalZero for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
247*10465441SEvalZero {
248*10465441SEvalZero if (idle_hook_list[i] != RT_NULL)
249*10465441SEvalZero {
250*10465441SEvalZero idle_hook_list[i]();
251*10465441SEvalZero }
252*10465441SEvalZero }
253*10465441SEvalZero #endif
254*10465441SEvalZero
255*10465441SEvalZero rt_thread_idle_excute();
256*10465441SEvalZero }
257*10465441SEvalZero }
258*10465441SEvalZero
259*10465441SEvalZero /**
260*10465441SEvalZero * @ingroup SystemInit
261*10465441SEvalZero *
262*10465441SEvalZero * This function will initialize idle thread, then start it.
263*10465441SEvalZero *
264*10465441SEvalZero * @note this function must be invoked when system init.
265*10465441SEvalZero */
rt_thread_idle_init(void)266*10465441SEvalZero void rt_thread_idle_init(void)
267*10465441SEvalZero {
268*10465441SEvalZero rt_ubase_t i;
269*10465441SEvalZero char tidle_name[RT_NAME_MAX];
270*10465441SEvalZero
271*10465441SEvalZero for (i = 0; i < _CPUS_NR; i++)
272*10465441SEvalZero {
273*10465441SEvalZero rt_sprintf(tidle_name, "tidle%d", i);
274*10465441SEvalZero rt_thread_init(&idle[i],
275*10465441SEvalZero tidle_name,
276*10465441SEvalZero rt_thread_idle_entry,
277*10465441SEvalZero RT_NULL,
278*10465441SEvalZero &rt_thread_stack[i][0],
279*10465441SEvalZero sizeof(rt_thread_stack[i]),
280*10465441SEvalZero RT_THREAD_PRIORITY_MAX - 1,
281*10465441SEvalZero 32);
282*10465441SEvalZero #ifdef RT_USING_SMP
283*10465441SEvalZero rt_thread_control(&idle[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
284*10465441SEvalZero #endif
285*10465441SEvalZero /* startup */
286*10465441SEvalZero rt_thread_startup(&idle[i]);
287*10465441SEvalZero }
288*10465441SEvalZero }
289*10465441SEvalZero
290*10465441SEvalZero /**
291*10465441SEvalZero * @ingroup Thread
292*10465441SEvalZero *
293*10465441SEvalZero * This function will get the handler of the idle thread.
294*10465441SEvalZero *
295*10465441SEvalZero */
rt_thread_idle_gethandler(void)296*10465441SEvalZero rt_thread_t rt_thread_idle_gethandler(void)
297*10465441SEvalZero {
298*10465441SEvalZero #ifdef RT_USING_SMP
299*10465441SEvalZero register int id = rt_hw_cpu_id();
300*10465441SEvalZero #else
301*10465441SEvalZero register int id = 0;
302*10465441SEvalZero #endif
303*10465441SEvalZero
304*10465441SEvalZero return (rt_thread_t)(&idle[id]);
305*10465441SEvalZero }
306