Lines Matching +full:data +full:- +full:active

1 // SPDX-License-Identifier: GPL-2.0-only
15 #include "tick-internal.h"
34 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5
37 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47
44 * active in the group. This designated role is necessary to avoid that all
45 * active CPUs in a group try to migrate expired timers from other CPUs,
50 * no CPU is active, it also checks the groups where no migrator is set
65 * When CPU comes out of idle and when a group has at least a single active
69 * time. This spares locking in active path as the lock protects (after
74 * the next active CPU in the group or sets migrator to TMIGR_NONE when
75 * there is no active CPU in the group. This delegation needs to be
86 * ---------------
96 * --------------
101 * active CPU/group information atomic_try_cmpxchg() is used instead and only
102 * the per CPU tmigr_cpu->lock is held.
107 * When @timer_base->lock as well as tmigr related locks are required, the lock
108 * ordering is: first @timer_base->lock, afterwards tmigr related locks.
112 * ------------------------------------------------
114 * The state information with the list of active children and migrator needs to
125 * active = GRP0:0, GRP0:1
129 * active = CPU0 active = CPU2
132 * active idle active idle
141 * active = GRP0:0, GRP0:1
144 * --> migrator = TMIGR_NONE migrator = CPU2
145 * --> active = active = CPU2
148 * --> idle idle active idle
158 * active = GRP0:0, GRP0:1
161 * --> migrator = CPU1 migrator = CPU2
162 * --> active = CPU1 active = CPU2
165 * idle --> active active idle
169 * active members of GRP1:0 remain unchanged after the update since it is
173 * --> migrator = GRP0:1
174 * --> active = GRP0:0, GRP0:1
178 * active = CPU1 active = CPU2
181 * idle active active idle
186 * --> migrator = GRP0:1
187 * --> active = GRP0:1
191 * active = CPU1 active = CPU2
194 * idle active active idle
198 * active and is correctly listed as active in GRP0:0. However GRP1:0 does not
199 * have GRP0:0 listed as active, which is wrong. The sequence counter has been
202 * expected value (compare-and-exchange).
210 * ----------------------------------------------------------
217 * 1. Only CPU2 is active:
221 * active = GRP0:1
226 * active = active = CPU2
230 * idle idle active idle
237 * active = GRP0:1
241 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE
242 * active = --> active =
246 * idle idle --> idle idle
254 * --> migrator = TMIGR_NONE
255 * --> active =
260 * active = active =
264 * idle idle --> idle idle
271 * active =
276 * active = active =
277 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX
282 * 5. GRP0:0 is not active, so the new timer has to be propagated to
285 * handed back to CPU0, as it seems that there is still an active child in
290 * active =
291 * --> next_expiry = TIMER0
295 * active = active =
305 * -------------------------- ---------------------------
307 * cmpxchg(&GRP1:0->state);
309 * spin_lock(&GRP1:0->lock);
312 * spin_unlock(&GRP1:0->lock);
316 * spin_lock(&GRP1:0->lock)
318 * group_state = atomic_read(&GRP1:0->state)
321 * spin_unlock(&GRP1:0->lock) <3>
328 * update of the group state from active path is no problem, as the upcoming CPU
332 * -----------------------------------------------------------
341 * also idle and has no global timer pending. CPU2 is the only active CPU and
346 * active = GRP0:1
347 * --> timerqueue = evt-GRP0:0
351 * active = active = CPU2
354 * timerqueue = evt-CPU0, timerqueue =
355 * evt-CPU1
358 * idle idle active idle
368 * active = GRP0:1
369 * --> timerqueue =
373 * active = active = CPU2
375 * --> groupevt.cpu = CPU0 groupevt.cpu =
376 * timerqueue = evt-CPU0, timerqueue =
377 * evt-CPU1
380 * idle idle active idle
392 * active:
396 * active = GRP0:1
397 * --> timerqueue = evt-GRP0:0
401 * active = active = CPU2
403 * --> groupevt.cpu = CPU1 groupevt.cpu =
404 * --> timerqueue = evt-CPU1 timerqueue =
407 * idle idle active idle
413 * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care
430 return !(tmc->tmgroup && tmc->online); in tmigr_is_not_available()
435 * group is not active - so no migrator is set.
441 s.state = atomic_read(&group->migr_state); in tmigr_check_migrator()
452 unsigned long active; in tmigr_check_migrator_and_lonely() local
455 s.state = atomic_read(&group->migr_state); in tmigr_check_migrator_and_lonely()
460 active = s.active; in tmigr_check_migrator_and_lonely()
461 lonely = bitmap_weight(&active, BIT_CNT) <= 1; in tmigr_check_migrator_and_lonely()
468 unsigned long active; in tmigr_check_lonely() local
471 s.state = atomic_read(&group->migr_state); in tmigr_check_lonely()
473 active = s.active; in tmigr_check_lonely()
475 return bitmap_weight(&active, BIT_CNT) <= 1; in tmigr_check_lonely()
479 * struct tmigr_walk - data required for walking the hierarchy
525 static void __walk_groups(up_f up, struct tmigr_walk *data, in __walk_groups() argument
528 struct tmigr_group *child = NULL, *group = tmc->tmgroup; in __walk_groups()
531 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels); in __walk_groups()
533 if (up(group, child, data)) in __walk_groups()
541 group = READ_ONCE(group->parent); in __walk_groups()
542 data->childmask = child->groupmask; in __walk_groups()
543 WARN_ON_ONCE(!data->childmask); in __walk_groups()
547 static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc) in walk_groups() argument
549 lockdep_assert_held(&tmc->lock); in walk_groups()
551 __walk_groups(up, data, tmc); in walk_groups()
555 * Returns the next event of the timerqueue @group->events
565 lockdep_assert_held(&group->lock); in tmigr_next_groupevt()
567 WRITE_ONCE(group->next_expiry, KTIME_MAX); in tmigr_next_groupevt()
569 while ((node = timerqueue_getnext(&group->events))) { in tmigr_next_groupevt()
572 if (!READ_ONCE(evt->ignore)) { in tmigr_next_groupevt()
573 WRITE_ONCE(group->next_expiry, evt->nextevt.expires); in tmigr_next_groupevt()
581 if (!timerqueue_del(&group->events, node)) in tmigr_next_groupevt()
598 if (!evt || now < evt->nextevt.expires) in tmigr_next_expired_groupevt()
604 timerqueue_del(&group->events, &evt->nextevt); in tmigr_next_expired_groupevt()
619 return evt->nextevt.expires; in tmigr_next_groupevt_expires()
624 struct tmigr_walk *data) in tmigr_active_up() argument
630 childmask = data->childmask; in tmigr_active_up()
636 curstate.state = atomic_read(&group->migr_state); in tmigr_active_up()
649 newstate.active |= childmask; in tmigr_active_up()
652 } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)); in tmigr_active_up()
657 * The group is active (again). The group event might be still queued in tmigr_active_up()
662 * The update of the ignore flag in the active path is done lockless. In in tmigr_active_up()
668 WRITE_ONCE(group->groupevt.ignore, true); in tmigr_active_up()
675 struct tmigr_walk data; in __tmigr_cpu_activate() local
677 data.childmask = tmc->groupmask; in __tmigr_cpu_activate()
681 tmc->cpuevt.ignore = true; in __tmigr_cpu_activate()
682 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in __tmigr_cpu_activate()
684 walk_groups(&tmigr_active_up, &data, tmc); in __tmigr_cpu_activate()
688 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy
699 if (WARN_ON_ONCE(!tmc->idle)) in tmigr_cpu_activate()
702 raw_spin_lock(&tmc->lock); in tmigr_cpu_activate()
703 tmc->idle = false; in tmigr_cpu_activate()
705 raw_spin_unlock(&tmc->lock); in tmigr_cpu_activate()
711 * @data->firstexp is set to expiry of first gobal event of the (top level of
723 struct tmigr_walk *data) in tmigr_update_events() argument
727 bool remote = data->remote; in tmigr_update_events()
733 raw_spin_lock(&child->lock); in tmigr_update_events()
734 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); in tmigr_update_events()
736 childstate.state = atomic_read(&child->migr_state); in tmigr_update_events()
737 groupstate.state = atomic_read(&group->migr_state); in tmigr_update_events()
739 if (childstate.active) { in tmigr_update_events()
745 nextexp = child->next_expiry; in tmigr_update_events()
746 evt = &child->groupevt; in tmigr_update_events()
755 WRITE_ONCE(evt->ignore, ignore); in tmigr_update_events()
757 nextexp = data->nextexp; in tmigr_update_events()
759 first_childevt = evt = data->evt; in tmigr_update_events()
760 ignore = evt->ignore; in tmigr_update_events()
765 * already queued events in non active groups (see section in tmigr_update_events()
772 * - When entering this path by tmigr_new_timer(), @evt->ignore in tmigr_update_events()
774 * - tmigr_inactive_up() takes care of the propagation by in tmigr_update_events()
786 if (ignore && !remote && group->parent) in tmigr_update_events()
789 raw_spin_lock(&group->lock); in tmigr_update_events()
792 groupstate.state = atomic_read(&group->migr_state); in tmigr_update_events()
799 if (timerqueue_node_queued(&evt->nextevt)) { in tmigr_update_events()
800 if ((evt->nextevt.expires == nextexp) && !ignore) { in tmigr_update_events()
802 evt->cpu = first_childevt->cpu; in tmigr_update_events()
806 if (!timerqueue_del(&group->events, &evt->nextevt)) in tmigr_update_events()
807 WRITE_ONCE(group->next_expiry, KTIME_MAX); in tmigr_update_events()
814 * the group is already active, there is no need to walk the in tmigr_update_events()
819 * is not active, walking the hierarchy is required to not miss in tmigr_update_events()
820 * an enqueued timer in the non active group. The enqueued timer in tmigr_update_events()
824 if (!remote || groupstate.active) in tmigr_update_events()
827 evt->nextevt.expires = nextexp; in tmigr_update_events()
828 evt->cpu = first_childevt->cpu; in tmigr_update_events()
830 if (timerqueue_add(&group->events, &evt->nextevt)) in tmigr_update_events()
831 WRITE_ONCE(group->next_expiry, nextexp); in tmigr_update_events()
835 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) { in tmigr_update_events()
841 * handled when top level group is not active, is calculated in tmigr_update_events()
854 data->firstexp = tmigr_next_groupevt_expires(group); in tmigr_update_events()
861 raw_spin_unlock(&group->lock); in tmigr_update_events()
864 raw_spin_unlock(&child->lock); in tmigr_update_events()
871 struct tmigr_walk *data) in tmigr_new_timer_up() argument
873 return tmigr_update_events(group, child, data); in tmigr_new_timer_up()
878 * returned, if an active CPU will handle all the timer migration hierarchy
883 struct tmigr_walk data = { .nextexp = nextexp, in tmigr_new_timer() local
885 .evt = &tmc->cpuevt }; in tmigr_new_timer()
887 lockdep_assert_held(&tmc->lock); in tmigr_new_timer()
889 if (tmc->remote) in tmigr_new_timer()
894 tmc->cpuevt.ignore = false; in tmigr_new_timer()
895 data.remote = false; in tmigr_new_timer()
897 walk_groups(&tmigr_new_timer_up, &data, tmc); in tmigr_new_timer()
900 return data.firstexp; in tmigr_new_timer()
907 struct tmigr_walk data; in tmigr_handle_remote_cpu() local
912 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
929 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || in tmigr_handle_remote_cpu()
930 now < tmc->cpuevt.nextevt.expires) { in tmigr_handle_remote_cpu()
931 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
937 tmc->remote = true; in tmigr_handle_remote_cpu()
938 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_handle_remote_cpu()
941 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
947 * Lock ordering needs to be preserved - timer_base locks before tmigr in tmigr_handle_remote_cpu()
949 * the top). During fetching the next timer interrupt, also tmc->lock in tmigr_handle_remote_cpu()
963 raw_spin_lock(&tmc->lock); in tmigr_handle_remote_cpu()
976 if (!tmc->online || !tmc->idle) { in tmigr_handle_remote_cpu()
985 data.nextexp = tevt.global; in tmigr_handle_remote_cpu()
986 data.firstexp = KTIME_MAX; in tmigr_handle_remote_cpu()
987 data.evt = &tmc->cpuevt; in tmigr_handle_remote_cpu()
988 data.remote = true; in tmigr_handle_remote_cpu()
995 walk_groups(&tmigr_new_timer_up, &data, tmc); in tmigr_handle_remote_cpu()
998 tmc->remote = false; in tmigr_handle_remote_cpu()
999 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
1004 struct tmigr_walk *data) in tmigr_handle_remote_up() argument
1011 jif = data->basej; in tmigr_handle_remote_up()
1012 now = data->now; in tmigr_handle_remote_up()
1014 childmask = data->childmask; in tmigr_handle_remote_up()
1020 * group has no migrator. Otherwise the group is active and is in tmigr_handle_remote_up()
1026 raw_spin_lock_irq(&group->lock); in tmigr_handle_remote_up()
1031 unsigned int remote_cpu = evt->cpu; in tmigr_handle_remote_up()
1033 raw_spin_unlock_irq(&group->lock); in tmigr_handle_remote_up()
1043 * (group->next_expiry was updated by tmigr_next_expired_groupevt(), in tmigr_handle_remote_up()
1046 data->firstexp = group->next_expiry; in tmigr_handle_remote_up()
1048 raw_spin_unlock_irq(&group->lock); in tmigr_handle_remote_up()
1054 * tmigr_handle_remote() - Handle global timers of remote idle CPUs
1061 struct tmigr_walk data; in tmigr_handle_remote() local
1066 data.childmask = tmc->groupmask; in tmigr_handle_remote()
1067 data.firstexp = KTIME_MAX; in tmigr_handle_remote()
1074 if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) { in tmigr_handle_remote()
1080 if (READ_ONCE(tmc->wakeup) == KTIME_MAX) in tmigr_handle_remote()
1084 data.now = get_jiffies_update(&data.basej); in tmigr_handle_remote()
1087 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to in tmigr_handle_remote()
1088 * KTIME_MAX. Even if tmc->lock is not held during the whole remote in tmigr_handle_remote()
1089 * handling, tmc->wakeup is fine to be stale as it is called in in tmigr_handle_remote()
1094 __walk_groups(&tmigr_handle_remote_up, &data, tmc); in tmigr_handle_remote()
1096 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote()
1097 WRITE_ONCE(tmc->wakeup, data.firstexp); in tmigr_handle_remote()
1098 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote()
1103 struct tmigr_walk *data) in tmigr_requires_handle_remote_up() argument
1107 childmask = data->childmask; in tmigr_requires_handle_remote_up()
1111 * has no migrator. Otherwise the group is active and is handled by its in tmigr_requires_handle_remote_up()
1119 * hierarchy walk is not active, proceed the walk to reach the top level in tmigr_requires_handle_remote_up()
1122 if (group->parent && !data->tmc_active) in tmigr_requires_handle_remote_up()
1132 data->firstexp = READ_ONCE(group->next_expiry); in tmigr_requires_handle_remote_up()
1133 if (data->now >= data->firstexp) { in tmigr_requires_handle_remote_up()
1134 data->check = true; in tmigr_requires_handle_remote_up()
1138 raw_spin_lock(&group->lock); in tmigr_requires_handle_remote_up()
1139 data->firstexp = group->next_expiry; in tmigr_requires_handle_remote_up()
1140 if (data->now >= group->next_expiry) { in tmigr_requires_handle_remote_up()
1141 data->check = true; in tmigr_requires_handle_remote_up()
1142 raw_spin_unlock(&group->lock); in tmigr_requires_handle_remote_up()
1145 raw_spin_unlock(&group->lock); in tmigr_requires_handle_remote_up()
1152 * tmigr_requires_handle_remote() - Check the need of remote timer handling
1159 struct tmigr_walk data; in tmigr_requires_handle_remote() local
1166 data.now = get_jiffies_update(&jif); in tmigr_requires_handle_remote()
1167 data.childmask = tmc->groupmask; in tmigr_requires_handle_remote()
1168 data.firstexp = KTIME_MAX; in tmigr_requires_handle_remote()
1169 data.tmc_active = !tmc->idle; in tmigr_requires_handle_remote()
1170 data.check = false; in tmigr_requires_handle_remote()
1173 * If the CPU is active, walk the hierarchy to check whether a remote in tmigr_requires_handle_remote()
1176 * Check is done lockless as interrupts are disabled and @tmc->idle is in tmigr_requires_handle_remote()
1179 if (!tmc->idle) { in tmigr_requires_handle_remote()
1180 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc); in tmigr_requires_handle_remote()
1182 return data.check; in tmigr_requires_handle_remote()
1186 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock in tmigr_requires_handle_remote()
1192 if (data.now >= READ_ONCE(tmc->wakeup)) in tmigr_requires_handle_remote()
1195 raw_spin_lock(&tmc->lock); in tmigr_requires_handle_remote()
1196 if (data.now >= tmc->wakeup) in tmigr_requires_handle_remote()
1198 raw_spin_unlock(&tmc->lock); in tmigr_requires_handle_remote()
1205 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1210 * and thereby the timer idle path is executed once more. @tmc->wakeup
1225 raw_spin_lock(&tmc->lock); in tmigr_cpu_new_timer()
1227 ret = READ_ONCE(tmc->wakeup); in tmigr_cpu_new_timer()
1229 if (nextexp != tmc->cpuevt.nextevt.expires || in tmigr_cpu_new_timer()
1230 tmc->cpuevt.ignore) { in tmigr_cpu_new_timer()
1236 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_new_timer()
1240 raw_spin_unlock(&tmc->lock); in tmigr_cpu_new_timer()
1246 struct tmigr_walk *data) in tmigr_inactive_up() argument
1252 childmask = data->childmask; in tmigr_inactive_up()
1261 curstate.state = atomic_read_acquire(&group->migr_state); in tmigr_inactive_up()
1265 childstate.state = atomic_read(&child->migr_state); in tmigr_inactive_up()
1270 /* Reset active bit when the child is no longer active */ in tmigr_inactive_up()
1271 if (!childstate.active) in tmigr_inactive_up()
1272 newstate.active &= ~childmask; in tmigr_inactive_up()
1279 if (!childstate.active) { in tmigr_inactive_up()
1280 unsigned long new_migr_bit, active = newstate.active; in tmigr_inactive_up() local
1282 new_migr_bit = find_first_bit(&active, BIT_CNT); in tmigr_inactive_up()
1297 WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active)); in tmigr_inactive_up()
1299 if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)) { in tmigr_inactive_up()
1313 data->remote = false; in tmigr_inactive_up()
1316 tmigr_update_events(group, child, data); in tmigr_inactive_up()
1323 struct tmigr_walk data = { .nextexp = nextexp, in __tmigr_cpu_deactivate() local
1325 .evt = &tmc->cpuevt, in __tmigr_cpu_deactivate()
1326 .childmask = tmc->groupmask }; in __tmigr_cpu_deactivate()
1334 tmc->cpuevt.ignore = false; in __tmigr_cpu_deactivate()
1336 walk_groups(&tmigr_inactive_up, &data, tmc); in __tmigr_cpu_deactivate()
1337 return data.firstexp; in __tmigr_cpu_deactivate()
1341 * tmigr_cpu_deactivate() - Put current CPU into inactive state
1358 raw_spin_lock(&tmc->lock); in tmigr_cpu_deactivate()
1362 tmc->idle = true; in tmigr_cpu_deactivate()
1368 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_deactivate()
1371 raw_spin_unlock(&tmc->lock); in tmigr_cpu_deactivate()
1376 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to
1381 * * KTIME_MAX - when it is probable that nothing has to be done (not
1384 * single group active on the way to top level)
1385 * * nextevt - when CPU is offline and has to handle timer on its own
1387 * child is active but @nextevt is before the lowest
1389 * * next_expiry - value of lowest expiry encountered while walking groups
1390 * if only a single child is active on each and @nextevt
1396 struct tmigr_group *group = tmc->tmgroup; in tmigr_quick_check()
1401 if (WARN_ON_ONCE(tmc->idle)) in tmigr_quick_check()
1404 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask)) in tmigr_quick_check()
1412 * Since current CPU is active, events may not be sorted in tmigr_quick_check()
1417 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry)); in tmigr_quick_check()
1418 if (!group->parent) in tmigr_quick_check()
1421 group = group->parent; in tmigr_quick_check()
1428 * tmigr_trigger_active() - trigger a CPU to become active again
1431 * last active CPU in the hierarchy is offlining. With this, it is ensured that
1432 * the other CPU is active and takes over the migrator duty.
1438 WARN_ON_ONCE(!tmc->online || tmc->idle); in tmigr_trigger_active()
1449 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_offline()
1450 tmc->online = false; in tmigr_cpu_offline()
1451 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_offline()
1459 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_offline()
1473 /* Check whether CPU data was successfully initialized */ in tmigr_cpu_online()
1474 if (WARN_ON_ONCE(!tmc->tmgroup)) in tmigr_cpu_online()
1475 return -EINVAL; in tmigr_cpu_online()
1477 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_online()
1479 tmc->idle = timer_base_is_idle(); in tmigr_cpu_online()
1480 if (!tmc->idle) in tmigr_cpu_online()
1482 tmc->online = true; in tmigr_cpu_online()
1483 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_online()
1492 raw_spin_lock_init(&group->lock); in tmigr_init_group()
1494 group->level = lvl; in tmigr_init_group()
1495 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE; in tmigr_init_group()
1497 group->num_children = 0; in tmigr_init_group()
1500 s.active = 0; in tmigr_init_group()
1502 atomic_set(&group->migr_state, s.state); in tmigr_init_group()
1505 * If this is a new top-level, prepare its groupmask in advance. in tmigr_init_group()
1506 * This avoids accidents where yet another new top-level is in tmigr_init_group()
1510 group->groupmask = BIT(0); in tmigr_init_group()
1516 group->num_children = 1; in tmigr_init_group()
1519 timerqueue_init_head(&group->events); in tmigr_init_group()
1520 timerqueue_init(&group->groupevt.nextevt); in tmigr_init_group()
1521 group->groupevt.nextevt.expires = KTIME_MAX; in tmigr_init_group()
1522 WRITE_ONCE(group->next_expiry, KTIME_MAX); in tmigr_init_group()
1523 group->groupevt.ignore = true; in tmigr_init_group()
1539 if (lvl < tmigr_crossnode_level && tmp->numa_node != node) in tmigr_get_group()
1543 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP) in tmigr_get_group()
1563 return ERR_PTR(-ENOMEM); in tmigr_get_group()
1568 list_add(&group->list, &tmigr_level_list[lvl]); in tmigr_get_group()
1577 struct tmigr_walk data; in tmigr_connect_child_parent() local
1579 raw_spin_lock_irq(&child->lock); in tmigr_connect_child_parent()
1580 raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); in tmigr_connect_child_parent()
1585 * case groupmask is pre-initialized and @child already in tmigr_connect_child_parent()
1589 WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2); in tmigr_connect_child_parent()
1592 child->groupmask = BIT(parent->num_children++); in tmigr_connect_child_parent()
1600 smp_store_release(&child->parent, parent); in tmigr_connect_child_parent()
1602 raw_spin_unlock(&parent->lock); in tmigr_connect_child_parent()
1603 raw_spin_unlock_irq(&child->lock); in tmigr_connect_child_parent()
1611 * To prevent inconsistent states, active children need to be active in in tmigr_connect_child_parent()
1617 * top level), then they are not active. They will be set active when in tmigr_connect_child_parent()
1618 * the new online CPU comes active. in tmigr_connect_child_parent()
1621 * mandatory to propagate the active state of the already existing in tmigr_connect_child_parent()
1626 * * It is ensured that the child is active, as this setup path is in tmigr_connect_child_parent()
1632 * @child. Therefore propagate active state unconditionally. in tmigr_connect_child_parent()
1634 data.childmask = child->groupmask; in tmigr_connect_child_parent()
1639 * child active when the parent is inactive, the parent needs to be the in tmigr_connect_child_parent()
1642 WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); in tmigr_connect_child_parent()
1653 return -ENOMEM; in tmigr_setup_groups()
1673 if (group->parent || list_is_singular(&tmigr_level_list[i - 1])) in tmigr_setup_groups()
1679 WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top])); in tmigr_setup_groups()
1682 group = stack[--i]; in tmigr_setup_groups()
1685 list_del(&group->list); in tmigr_setup_groups()
1690 WARN_ON_ONCE(i != group->level); in tmigr_setup_groups()
1693 * Update tmc -> group / child -> group connection in tmigr_setup_groups()
1698 raw_spin_lock_irq(&group->lock); in tmigr_setup_groups()
1700 tmc->tmgroup = group; in tmigr_setup_groups()
1701 tmc->groupmask = BIT(group->num_children++); in tmigr_setup_groups()
1703 raw_spin_unlock_irq(&group->lock); in tmigr_setup_groups()
1710 child = stack[i - 1]; in tmigr_setup_groups()
1725 * CPU's child group and pre-accounted the old root. in tmigr_setup_groups()
1727 if (group->num_children == 2 && list_is_singular(lvllist)) { in tmigr_setup_groups()
1733 * active or not) and/or release an uninitialized childmask. in tmigr_setup_groups()
1737 lvllist = &tmigr_level_list[top - 1]; in tmigr_setup_groups()
1739 if (child->parent) in tmigr_setup_groups()
1770 if (tmc->tmgroup) in tmigr_cpu_prepare()
1773 raw_spin_lock_init(&tmc->lock); in tmigr_cpu_prepare()
1774 timerqueue_init(&tmc->cpuevt.nextevt); in tmigr_cpu_prepare()
1775 tmc->cpuevt.nextevt.expires = KTIME_MAX; in tmigr_cpu_prepare()
1776 tmc->cpuevt.ignore = true; in tmigr_cpu_prepare()
1777 tmc->cpuevt.cpu = cpu; in tmigr_cpu_prepare()
1778 tmc->remote = false; in tmigr_cpu_prepare()
1779 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_prepare()
1785 if (tmc->groupmask == 0) in tmigr_cpu_prepare()
1786 return -EINVAL; in tmigr_cpu_prepare()
1796 int ret = -ENOMEM; in tmigr_init()