xref: /nrf52832-nimble/rt-thread/src/idle.c (revision 104654410c56c573564690304ae786df310c91fc)
1 /*
2  * Copyright (c) 2006-2018, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2006-03-23     Bernard      the first version
9  * 2010-11-10     Bernard      add cleanup callback function in thread exit.
10  * 2012-12-29     Bernard      fix compiling warning.
11  * 2013-12-21     Grissiom     let rt_thread_idle_excute loop until there is no
12  *                             dead thread.
13  * 2016-08-09     ArdaFu       add method to get the handler of the idle thread.
14  * 2018-02-07     Bernard      lock scheduler to protect tid->cleanup.
15  * 2018-07-14     armink       add idle hook list
16  * 2018-11-22     Jesven       add per cpu idle task
17  *                             combine the code of primary and secondary cpu
18  */
19 
20 #include <rthw.h>
21 #include <rtthread.h>
22 
23 #ifdef RT_USING_MODULE
24 #include <dlmodule.h>
25 #endif
26 
27 #if defined (RT_USING_HOOK)
28 #ifndef RT_USING_IDLE_HOOK
29 #define RT_USING_IDLE_HOOK
30 #endif
31 #endif
32 
33 #ifndef IDLE_THREAD_STACK_SIZE
34 #if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
35 #define IDLE_THREAD_STACK_SIZE  256
36 #else
37 #define IDLE_THREAD_STACK_SIZE  128
38 #endif
39 #endif
40 
41 #ifdef RT_USING_SMP
42 #define _CPUS_NR                RT_CPUS_NR
43 #else
44 #define _CPUS_NR                1
45 #endif
46 
47 extern rt_list_t rt_thread_defunct;
48 
49 static struct rt_thread idle[_CPUS_NR];
50 ALIGN(RT_ALIGN_SIZE)
51 static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
52 
53 #ifdef RT_USING_IDLE_HOOK
54 #ifndef RT_IDEL_HOOK_LIST_SIZE
55 #define RT_IDEL_HOOK_LIST_SIZE  4
56 #endif
57 
58 static void (*idle_hook_list[RT_IDEL_HOOK_LIST_SIZE])();
59 
60 /**
61  * @ingroup Hook
62  * This function sets a hook function to idle thread loop. When the system performs
63  * idle loop, this hook function should be invoked.
64  *
65  * @param hook the specified hook function
66  *
67  * @return RT_EOK: set OK
68  *         -RT_EFULL: hook list is full
69  *
70  * @note the hook function must be simple and never be blocked or suspend.
71  */
rt_thread_idle_sethook(void (* hook)(void))72 rt_err_t rt_thread_idle_sethook(void (*hook)(void))
73 {
74     rt_size_t i;
75     rt_base_t level;
76     rt_err_t ret = -RT_EFULL;
77 
78     /* disable interrupt */
79     level = rt_hw_interrupt_disable();
80 
81     for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
82     {
83         if (idle_hook_list[i] == RT_NULL)
84         {
85             idle_hook_list[i] = hook;
86             ret = RT_EOK;
87             break;
88         }
89     }
90     /* enable interrupt */
91     rt_hw_interrupt_enable(level);
92 
93     return ret;
94 }
95 
96 /**
97  * delete the idle hook on hook list
98  *
99  * @param hook the specified hook function
100  *
101  * @return RT_EOK: delete OK
102  *         -RT_ENOSYS: hook was not found
103  */
rt_thread_idle_delhook(void (* hook)(void))104 rt_err_t rt_thread_idle_delhook(void (*hook)(void))
105 {
106     rt_size_t i;
107     rt_base_t level;
108     rt_err_t ret = -RT_ENOSYS;
109 
110     /* disable interrupt */
111     level = rt_hw_interrupt_disable();
112 
113     for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
114     {
115         if (idle_hook_list[i] == hook)
116         {
117             idle_hook_list[i] = RT_NULL;
118             ret = RT_EOK;
119             break;
120         }
121     }
122     /* enable interrupt */
123     rt_hw_interrupt_enable(level);
124 
125     return ret;
126 }
127 
128 #endif
129 
130 /* Return whether there is defunctional thread to be deleted. */
_has_defunct_thread(void)131 rt_inline int _has_defunct_thread(void)
132 {
133     /* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)".
134      * So the compiler has a good reason that the rt_thread_defunct list does
135      * not change within rt_thread_idle_excute thus optimize the "while" loop
136      * into a "if".
137      *
138      * So add the volatile qualifier here. */
139     const volatile rt_list_t *l = (const volatile rt_list_t *)&rt_thread_defunct;
140 
141     return l->next != l;
142 }
143 
144 /**
145  * @ingroup Thread
146  *
147  * This function will perform system background job when system idle.
148  */
rt_thread_idle_excute(void)149 void rt_thread_idle_excute(void)
150 {
151     /* Loop until there is no dead thread. So one call to rt_thread_idle_excute
152      * will do all the cleanups. */
153     while (_has_defunct_thread())
154     {
155         rt_base_t lock;
156         rt_thread_t thread;
157 #ifdef RT_USING_MODULE
158         struct rt_dlmodule *module = RT_NULL;
159 #endif
160         RT_DEBUG_NOT_IN_INTERRUPT;
161 
162         /* disable interrupt */
163         lock = rt_hw_interrupt_disable();
164 
165         /* re-check whether list is empty */
166         if (_has_defunct_thread())
167         {
168             /* get defunct thread */
169             thread = rt_list_entry(rt_thread_defunct.next,
170                                    struct rt_thread,
171                                    tlist);
172 #ifdef RT_USING_MODULE
173             module = (struct rt_dlmodule*)thread->module_id;
174             if (module)
175             {
176                 dlmodule_destroy(module);
177             }
178 #endif
179             /* remove defunct thread */
180             rt_list_remove(&(thread->tlist));
181 
182             /* lock scheduler to prevent scheduling in cleanup function. */
183             rt_enter_critical();
184 
185             /* invoke thread cleanup */
186             if (thread->cleanup != RT_NULL)
187                 thread->cleanup(thread);
188 
189 #ifdef RT_USING_SIGNALS
190             rt_thread_free_sig(thread);
191 #endif
192 
193             /* if it's a system object, not delete it */
194             if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
195             {
196                 /* unlock scheduler */
197                 rt_exit_critical();
198 
199                 /* enable interrupt */
200                 rt_hw_interrupt_enable(lock);
201 
202                 return;
203             }
204 
205             /* unlock scheduler */
206             rt_exit_critical();
207         }
208         else
209         {
210             /* enable interrupt */
211             rt_hw_interrupt_enable(lock);
212 
213             /* may the defunct thread list is removed by others, just return */
214             return;
215         }
216 
217         /* enable interrupt */
218         rt_hw_interrupt_enable(lock);
219 
220 #ifdef RT_USING_HEAP
221         /* release thread's stack */
222         RT_KERNEL_FREE(thread->stack_addr);
223         /* delete thread object */
224         rt_object_delete((rt_object_t)thread);
225 #endif
226     }
227 }
228 
rt_thread_idle_entry(void * parameter)229 static void rt_thread_idle_entry(void *parameter)
230 {
231 #ifdef RT_USING_SMP
232     if (rt_hw_cpu_id() != 0)
233     {
234         while (1)
235         {
236             rt_hw_secondary_cpu_idle_exec();
237         }
238     }
239 #endif
240 
241     while (1)
242     {
243 #ifdef RT_USING_IDLE_HOOK
244         rt_size_t i;
245 
246         for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++)
247         {
248             if (idle_hook_list[i] != RT_NULL)
249             {
250                 idle_hook_list[i]();
251             }
252         }
253 #endif
254 
255         rt_thread_idle_excute();
256     }
257 }
258 
259 /**
260  * @ingroup SystemInit
261  *
262  * This function will initialize idle thread, then start it.
263  *
264  * @note this function must be invoked when system init.
265  */
rt_thread_idle_init(void)266 void rt_thread_idle_init(void)
267 {
268     rt_ubase_t i;
269     char tidle_name[RT_NAME_MAX];
270 
271     for (i = 0; i < _CPUS_NR; i++)
272     {
273         rt_sprintf(tidle_name, "tidle%d", i);
274         rt_thread_init(&idle[i],
275                 tidle_name,
276                 rt_thread_idle_entry,
277                 RT_NULL,
278                 &rt_thread_stack[i][0],
279                 sizeof(rt_thread_stack[i]),
280                 RT_THREAD_PRIORITY_MAX - 1,
281                 32);
282 #ifdef RT_USING_SMP
283         rt_thread_control(&idle[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
284 #endif
285         /* startup */
286         rt_thread_startup(&idle[i]);
287     }
288 }
289 
290 /**
291  * @ingroup Thread
292  *
293  * This function will get the handler of the idle thread.
294  *
295  */
rt_thread_idle_gethandler(void)296 rt_thread_t rt_thread_idle_gethandler(void)
297 {
298 #ifdef RT_USING_SMP
299     register int id = rt_hw_cpu_id();
300 #else
301     register int id = 0;
302 #endif
303 
304     return (rt_thread_t)(&idle[id]);
305 }
306