Lines Matching full:tmc
428 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc) in tmigr_is_not_available() argument
430 return !(tmc->tmgroup && tmc->online); in tmigr_is_not_available()
526 struct tmigr_cpu *tmc) in __walk_groups() argument
528 struct tmigr_group *child = NULL, *group = tmc->tmgroup; in __walk_groups()
547 static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc) in walk_groups() argument
549 lockdep_assert_held(&tmc->lock); in walk_groups()
551 __walk_groups(up, data, tmc); in walk_groups()
673 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc) in __tmigr_cpu_activate() argument
677 data.childmask = tmc->groupmask; in __tmigr_cpu_activate()
679 trace_tmigr_cpu_active(tmc); in __tmigr_cpu_activate()
681 tmc->cpuevt.ignore = true; in __tmigr_cpu_activate()
682 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in __tmigr_cpu_activate()
684 walk_groups(&tmigr_active_up, &data, tmc); in __tmigr_cpu_activate()
694 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_activate() local
696 if (tmigr_is_not_available(tmc)) in tmigr_cpu_activate()
699 if (WARN_ON_ONCE(!tmc->idle)) in tmigr_cpu_activate()
702 raw_spin_lock(&tmc->lock); in tmigr_cpu_activate()
703 tmc->idle = false; in tmigr_cpu_activate()
704 __tmigr_cpu_activate(tmc); in tmigr_cpu_activate()
705 raw_spin_unlock(&tmc->lock); in tmigr_cpu_activate()
881 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp) in tmigr_new_timer() argument
885 .evt = &tmc->cpuevt }; in tmigr_new_timer()
887 lockdep_assert_held(&tmc->lock); in tmigr_new_timer()
889 if (tmc->remote) in tmigr_new_timer()
892 trace_tmigr_cpu_new_timer(tmc); in tmigr_new_timer()
894 tmc->cpuevt.ignore = false; in tmigr_new_timer()
897 walk_groups(&tmigr_new_timer_up, &data, tmc); in tmigr_new_timer()
908 struct tmigr_cpu *tmc; in tmigr_handle_remote_cpu() local
910 tmc = per_cpu_ptr(&tmigr_cpu, cpu); in tmigr_handle_remote_cpu()
912 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
929 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || in tmigr_handle_remote_cpu()
930 now < tmc->cpuevt.nextevt.expires) { in tmigr_handle_remote_cpu()
931 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
935 trace_tmigr_handle_remote_cpu(tmc); in tmigr_handle_remote_cpu()
937 tmc->remote = true; in tmigr_handle_remote_cpu()
938 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_handle_remote_cpu()
941 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
949 * the top). During fetching the next timer interrupt, also tmc->lock in tmigr_handle_remote_cpu()
963 raw_spin_lock(&tmc->lock); in tmigr_handle_remote_cpu()
976 if (!tmc->online || !tmc->idle) { in tmigr_handle_remote_cpu()
987 data.evt = &tmc->cpuevt; in tmigr_handle_remote_cpu()
995 walk_groups(&tmigr_new_timer_up, &data, tmc); in tmigr_handle_remote_cpu()
998 tmc->remote = false; in tmigr_handle_remote_cpu()
999 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
1060 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_handle_remote() local
1063 if (tmigr_is_not_available(tmc)) in tmigr_handle_remote()
1066 data.childmask = tmc->groupmask; in tmigr_handle_remote()
1074 if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) { in tmigr_handle_remote()
1080 if (READ_ONCE(tmc->wakeup) == KTIME_MAX) in tmigr_handle_remote()
1087 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to in tmigr_handle_remote()
1088 * KTIME_MAX. Even if tmc->lock is not held during the whole remote in tmigr_handle_remote()
1089 * handling, tmc->wakeup is fine to be stale as it is called in in tmigr_handle_remote()
1094 __walk_groups(&tmigr_handle_remote_up, &data, tmc); in tmigr_handle_remote()
1096 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote()
1097 WRITE_ONCE(tmc->wakeup, data.firstexp); in tmigr_handle_remote()
1098 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote()
1158 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_requires_handle_remote() local
1163 if (tmigr_is_not_available(tmc)) in tmigr_requires_handle_remote()
1167 data.childmask = tmc->groupmask; in tmigr_requires_handle_remote()
1169 data.tmc_active = !tmc->idle; in tmigr_requires_handle_remote()
1176 * Check is done lockless as interrupts are disabled and @tmc->idle is in tmigr_requires_handle_remote()
1179 if (!tmc->idle) { in tmigr_requires_handle_remote()
1180 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc); in tmigr_requires_handle_remote()
1186 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock in tmigr_requires_handle_remote()
1192 if (data.now >= READ_ONCE(tmc->wakeup)) in tmigr_requires_handle_remote()
1195 raw_spin_lock(&tmc->lock); in tmigr_requires_handle_remote()
1196 if (data.now >= tmc->wakeup) in tmigr_requires_handle_remote()
1198 raw_spin_unlock(&tmc->lock); in tmigr_requires_handle_remote()
1205 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1210 * and thereby the timer idle path is executed once more. @tmc->wakeup
1219 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_new_timer() local
1222 if (tmigr_is_not_available(tmc)) in tmigr_cpu_new_timer()
1225 raw_spin_lock(&tmc->lock); in tmigr_cpu_new_timer()
1227 ret = READ_ONCE(tmc->wakeup); in tmigr_cpu_new_timer()
1229 if (nextexp != tmc->cpuevt.nextevt.expires || in tmigr_cpu_new_timer()
1230 tmc->cpuevt.ignore) { in tmigr_cpu_new_timer()
1231 ret = tmigr_new_timer(tmc, nextexp); in tmigr_cpu_new_timer()
1236 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_new_timer()
1239 trace_tmigr_cpu_new_timer_idle(tmc, nextexp); in tmigr_cpu_new_timer()
1240 raw_spin_unlock(&tmc->lock); in tmigr_cpu_new_timer()
1321 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp) in __tmigr_cpu_deactivate() argument
1325 .evt = &tmc->cpuevt, in __tmigr_cpu_deactivate()
1326 .childmask = tmc->groupmask }; in __tmigr_cpu_deactivate()
1334 tmc->cpuevt.ignore = false; in __tmigr_cpu_deactivate()
1336 walk_groups(&tmigr_inactive_up, &data, tmc); in __tmigr_cpu_deactivate()
1352 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_deactivate() local
1355 if (tmigr_is_not_available(tmc)) in tmigr_cpu_deactivate()
1358 raw_spin_lock(&tmc->lock); in tmigr_cpu_deactivate()
1360 ret = __tmigr_cpu_deactivate(tmc, nextexp); in tmigr_cpu_deactivate()
1362 tmc->idle = true; in tmigr_cpu_deactivate()
1368 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_deactivate()
1370 trace_tmigr_cpu_idle(tmc, nextexp); in tmigr_cpu_deactivate()
1371 raw_spin_unlock(&tmc->lock); in tmigr_cpu_deactivate()
1395 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_quick_check() local
1396 struct tmigr_group *group = tmc->tmgroup; in tmigr_quick_check()
1398 if (tmigr_is_not_available(tmc)) in tmigr_quick_check()
1401 if (WARN_ON_ONCE(tmc->idle)) in tmigr_quick_check()
1404 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask)) in tmigr_quick_check()
1436 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_trigger_active() local
1438 WARN_ON_ONCE(!tmc->online || tmc->idle); in tmigr_trigger_active()
1445 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_offline() local
1449 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_offline()
1450 tmc->online = false; in tmigr_cpu_offline()
1451 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_offline()
1457 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); in tmigr_cpu_offline()
1458 trace_tmigr_cpu_offline(tmc); in tmigr_cpu_offline()
1459 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_offline()
1471 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_online() local
1474 if (WARN_ON_ONCE(!tmc->tmgroup)) in tmigr_cpu_online()
1477 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_online()
1478 trace_tmigr_cpu_online(tmc); in tmigr_cpu_online()
1479 tmc->idle = timer_base_is_idle(); in tmigr_cpu_online()
1480 if (!tmc->idle) in tmigr_cpu_online()
1481 __tmigr_cpu_activate(tmc); in tmigr_cpu_online()
1482 tmc->online = true; in tmigr_cpu_online()
1483 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_online()
1693 * Update tmc -> group / child -> group connection in tmigr_setup_groups()
1696 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); in tmigr_setup_groups() local
1700 tmc->tmgroup = group; in tmigr_setup_groups()
1701 tmc->groupmask = BIT(group->num_children++); in tmigr_setup_groups()
1705 trace_tmigr_connect_cpu_parent(tmc); in tmigr_setup_groups()
1766 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); in tmigr_cpu_prepare() local
1770 if (tmc->tmgroup) in tmigr_cpu_prepare()
1773 raw_spin_lock_init(&tmc->lock); in tmigr_cpu_prepare()
1774 timerqueue_init(&tmc->cpuevt.nextevt); in tmigr_cpu_prepare()
1775 tmc->cpuevt.nextevt.expires = KTIME_MAX; in tmigr_cpu_prepare()
1776 tmc->cpuevt.ignore = true; in tmigr_cpu_prepare()
1777 tmc->cpuevt.cpu = cpu; in tmigr_cpu_prepare()
1778 tmc->remote = false; in tmigr_cpu_prepare()
1779 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_prepare()
1785 if (tmc->groupmask == 0) in tmigr_cpu_prepare()