1*10465441SEvalZero /* 2*10465441SEvalZero * Copyright (c) 2006-2018, RT-Thread Development Team 3*10465441SEvalZero * 4*10465441SEvalZero * SPDX-License-Identifier: Apache-2.0 5*10465441SEvalZero * 6*10465441SEvalZero * Change Logs: 7*10465441SEvalZero * Date Author Notes 8*10465441SEvalZero * 2018-10-30 Bernard The first version 9*10465441SEvalZero */ 10*10465441SEvalZero 11*10465441SEvalZero #include <rtthread.h> 12*10465441SEvalZero #include <rthw.h> 13*10465441SEvalZero 14*10465441SEvalZero #ifdef RT_USING_SMP 15*10465441SEvalZero 16*10465441SEvalZero static struct rt_cpu rt_cpus[RT_CPUS_NR]; 17*10465441SEvalZero rt_hw_spinlock_t _cpus_lock; 18*10465441SEvalZero 19*10465441SEvalZero /** 20*10465441SEvalZero * This fucntion will return current cpu. 21*10465441SEvalZero */ rt_cpu_self(void)22*10465441SEvalZerostruct rt_cpu *rt_cpu_self(void) 23*10465441SEvalZero { 24*10465441SEvalZero return &rt_cpus[rt_hw_cpu_id()]; 25*10465441SEvalZero } 26*10465441SEvalZero rt_cpu_index(int index)27*10465441SEvalZerostruct rt_cpu *rt_cpu_index(int index) 28*10465441SEvalZero { 29*10465441SEvalZero return &rt_cpus[index]; 30*10465441SEvalZero } 31*10465441SEvalZero 32*10465441SEvalZero /** 33*10465441SEvalZero * This function will lock all cpus's scheduler and disable local irq. 34*10465441SEvalZero */ rt_cpus_lock(void)35*10465441SEvalZerort_base_t rt_cpus_lock(void) 36*10465441SEvalZero { 37*10465441SEvalZero rt_base_t level; 38*10465441SEvalZero struct rt_cpu* pcpu; 39*10465441SEvalZero 40*10465441SEvalZero level = rt_hw_local_irq_disable(); 41*10465441SEvalZero 42*10465441SEvalZero pcpu = rt_cpu_self(); 43*10465441SEvalZero if (pcpu->current_thread != RT_NULL) 44*10465441SEvalZero { 45*10465441SEvalZero if (pcpu->current_thread->cpus_lock_nest++ == 0) 46*10465441SEvalZero { 47*10465441SEvalZero pcpu->current_thread->scheduler_lock_nest++; 48*10465441SEvalZero rt_hw_spin_lock(&_cpus_lock); 49*10465441SEvalZero } 50*10465441SEvalZero } 51*10465441SEvalZero return level; 52*10465441SEvalZero } 53*10465441SEvalZero RTM_EXPORT(rt_cpus_lock); 54*10465441SEvalZero 55*10465441SEvalZero /** 56*10465441SEvalZero * This function will restore all cpus's scheduler and restore local irq. 57*10465441SEvalZero */ rt_cpus_unlock(rt_base_t level)58*10465441SEvalZerovoid rt_cpus_unlock(rt_base_t level) 59*10465441SEvalZero { 60*10465441SEvalZero struct rt_cpu* pcpu = rt_cpu_self(); 61*10465441SEvalZero 62*10465441SEvalZero if (pcpu->current_thread != RT_NULL) 63*10465441SEvalZero { 64*10465441SEvalZero if (--pcpu->current_thread->cpus_lock_nest == 0) 65*10465441SEvalZero { 66*10465441SEvalZero pcpu->current_thread->scheduler_lock_nest--; 67*10465441SEvalZero rt_hw_spin_unlock(&_cpus_lock); 68*10465441SEvalZero } 69*10465441SEvalZero } 70*10465441SEvalZero rt_hw_local_irq_enable(level); 71*10465441SEvalZero } 72*10465441SEvalZero RTM_EXPORT(rt_cpus_unlock); 73*10465441SEvalZero 74*10465441SEvalZero /** 75*10465441SEvalZero * This function is invoked by scheduler. 76*10465441SEvalZero * It will restore the lock state to whatever the thread's counter expects. 77*10465441SEvalZero * If target thread not locked the cpus then unlock the cpus lock. 78*10465441SEvalZero */ rt_cpus_lock_status_restore(struct rt_thread * thread)79*10465441SEvalZerovoid rt_cpus_lock_status_restore(struct rt_thread *thread) 80*10465441SEvalZero { 81*10465441SEvalZero struct rt_cpu* pcpu = rt_cpu_self(); 82*10465441SEvalZero 83*10465441SEvalZero pcpu->current_thread = thread; 84*10465441SEvalZero if (!thread->cpus_lock_nest) 85*10465441SEvalZero { 86*10465441SEvalZero rt_hw_spin_unlock(&_cpus_lock); 87*10465441SEvalZero } 88*10465441SEvalZero } 89*10465441SEvalZero RTM_EXPORT(rt_cpus_lock_status_restore); 90*10465441SEvalZero 91*10465441SEvalZero #endif 92