Lines Matching +full:idle +full:- +full:wait +full:- +full:delay
1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
14 * For detailed explanation of Read-Copy Update mechanism see -
44 #include <linux/wait.h>
48 #include <linux/delay.h>
67 #include "../time/tick-internal.h"
86 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
110 /* Control rcu_node-tree auto-balancing at boot time. */
129 * boot-time false positives from lockdep-RCU error checking. Finally, it
142 * currently delay invocation of any RCU callbacks until after this point.
164 * real-time priority(enabling/disabling) is controlled by
170 /* Delay in jiffies for grace-period initialization delays, debug only. */
182 // Add delay to rcu_read_unlock() for strict grace periods.
197 * the delay. The longer the delay, the more the grace periods between
198 * each delay. The reason for this normalization is that it means that,
199 * for non-zero delays, the overall slowdown of grace periods is constant
200 * regardless of the duration of the delay. This arrangement balances
209 * structure's ->lock, but of course results can be subject to change.
224 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
225 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
230 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
233 * This is a special-purpose function to be used in the softirq
234 * infrastructure and perhaps the occasional long-running softirq
248 * call to do_something() would be guaranteed to wait only until
250 * that same synchronize_rcu() would instead be guaranteed to wait
258 "Illegal rcu_softirq_qs() in RCU read-side critical section"); in rcu_softirq_qs()
268 * to the next non-quiescent value.
270 * The non-atomic test/increment sequence works because the upper bits
271 * of the ->state variable are manipulated only by the corresponding CPU,
291 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
307 * performed by the remote CPU after it exits idle. in rcu_watching_snap_stopped_since()
310 * performed by the remote CPU prior to entering idle and therefore can in rcu_watching_snap_stopped_since()
316 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_stopped_since()
331 return false; // Non-zero, so report failure; in rcu_watching_zero_in_eqs()
332 smp_rmb(); // Order *vp read and CT state re-read. in rcu_watching_zero_in_eqs()
345 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
347 * The caller must have disabled interrupts and must not be idle.
355 /* It is illegal to call this from idle state. */ in rcu_momentary_eqs()
362 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
364 * If the current CPU is idle and running at a first-level (not nested)
365 * interrupt, or directly, from idle, return true.
376 * the idle task, instead of an actual IPI. in rcu_is_cpu_rrupt_from_idle()
392 * If we're not in an interrupt, we must be in the idle task! in rcu_is_cpu_rrupt_from_idle()
396 /* Does CPU appear to be idle from an RCU standpoint? */ in rcu_is_cpu_rrupt_from_idle()
411 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
430 * quiescent-state help from rcu_note_context_switch().
438 * Make sure that we give the grace-period kthread time to detect any
439 * idle CPUs before taking active measures to force quiescent states.
457 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); in adjust_jiffies_till_sched_qs()
467 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); in param_set_first_fqs_jiffies()
479 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); in param_set_next_fqs_jiffies()
514 * numbers mean idle. The value returned will thus be roughly double
532 * Send along grace-period-related data for rcutorture diagnostics.
544 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
561 * get re-enabled again.
567 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) in rcu_irq_work_resched()
570 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) in rcu_irq_work_resched()
583 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
601 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
612 * in a timely manner, the RCU grace-period kthread sets that CPU's
613 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
621 * interrupt or exception. In that case, the RCU grace-period kthread
637 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
638 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
639 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
648 // handler and that the rcu_node lock is an irq-disabled lock in __rcu_irq_enter_check_tick()
649 // prevents self-deadlock. So we can safely recheck under the lock. in __rcu_irq_enter_check_tick()
651 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
652 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
655 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
656 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
658 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
664 * Check to see if any future non-offloaded RCU-related work will need
667 * it is -not- an exported member of the RCU API. This is used by
668 * the idle-entry code to figure out whether it is safe to disable the
669 * scheduler-clock interrupt.
671 * Just check whether or not this CPU has non-offloaded RCU callbacks
676 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
682 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
687 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
688 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
689 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
690 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
691 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
692 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
697 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
700 * An @true return means that this CPU can safely enter RCU read-side
705 * current CPU is deep within its idle loop, in kernel entry/exit code,
743 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
744 * After all, the CPU might be in deep idle state, and thus executing no
750 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
751 rnp->gp_seq)) in rcu_gpnum_ovf()
752 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
753 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
754 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
760 * is in dynticks idle mode, which is an extended quiescent state.
765 * Full ordering between remote CPU's post idle accesses and updater's in rcu_watching_snap_save()
772 * Ordering between remote CPU's pre idle accesses and post grace period in rcu_watching_snap_save()
775 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_save()
776 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) { in rcu_watching_snap_save()
777 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_save()
778 rcu_gpnum_ovf(rdp->mynode, rdp); in rcu_watching_snap_save()
786 * by virtue of being in or having passed through an dynticks idle state since
798 struct rcu_node *rnp = rdp->mynode; in rcu_watching_snap_recheck()
801 * If the CPU passed through or entered a dynticks idle phase with in rcu_watching_snap_recheck()
805 * read-side critical section that started before the beginning in rcu_watching_snap_recheck()
808 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) { in rcu_watching_snap_recheck()
809 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_recheck()
818 * the CPU-offline process, or, failing that, by rcu_gp_init() in rcu_watching_snap_recheck()
820 * last task on a leaf rcu_node structure exiting its RCU read-side in rcu_watching_snap_recheck()
825 * The rcu_node structure's ->lock is held here, which excludes in rcu_watching_snap_recheck()
826 * the relevant portions the CPU-hotplug code, the grace-period in rcu_watching_snap_recheck()
835 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", in rcu_watching_snap_recheck()
836 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_watching_snap_recheck()
837 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_watching_snap_recheck()
838 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_watching_snap_recheck()
839 …pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n… in rcu_watching_snap_recheck()
840 …__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rc… in rcu_watching_snap_recheck()
842 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], in rcu_watching_snap_recheck()
843 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, in rcu_watching_snap_recheck()
844 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); in rcu_watching_snap_recheck()
850 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, in rcu_watching_snap_recheck()
853 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs in rcu_watching_snap_recheck()
860 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_watching_snap_recheck()
864 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_watching_snap_recheck()
866 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
868 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
872 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! in rcu_watching_snap_recheck()
874 * And some in-kernel loops check need_resched() before calling in rcu_watching_snap_recheck()
876 * running in-kernel with scheduling-clock interrupts disabled. in rcu_watching_snap_recheck()
879 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_watching_snap_recheck()
880 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_watching_snap_recheck()
882 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
883 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
884 ret = -1; in rcu_watching_snap_recheck()
888 * If more than halfway to RCU CPU stall-warning time, invoke in rcu_watching_snap_recheck()
896 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_watching_snap_recheck()
897 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
898 ret = -1; in rcu_watching_snap_recheck()
901 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_watching_snap_recheck()
902 (rnp->ffmask & rdp->grpmask)) { in rcu_watching_snap_recheck()
903 rdp->rcu_iw_pending = true; in rcu_watching_snap_recheck()
904 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_watching_snap_recheck()
905 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_watching_snap_recheck()
908 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { in rcu_watching_snap_recheck()
909 int cpu = rdp->cpu; in rcu_watching_snap_recheck()
915 rsrp = &rdp->snap_record; in rcu_watching_snap_recheck()
916 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); in rcu_watching_snap_recheck()
917 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); in rcu_watching_snap_recheck()
918 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); in rcu_watching_snap_recheck()
919 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); in rcu_watching_snap_recheck()
920 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); in rcu_watching_snap_recheck()
921 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); in rcu_watching_snap_recheck()
922 rsrp->jiffies = jiffies; in rcu_watching_snap_recheck()
923 rsrp->gp_seq = rdp->gp_seq; in rcu_watching_snap_recheck()
930 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
934 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in trace_rcu_this_gp()
935 gp_seq_req, rnp->level, in trace_rcu_this_gp()
936 rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
940 * rcu_start_this_gp - Request the start of a particular grace period
947 * rcu_node structure's ->gp_seq_needed field. Returns true if there
948 * is reason to awaken the grace-period kthread.
950 * The caller must hold the specified rcu_node structure's ->lock, which
951 * is why the caller is responsible for waking the grace-period kthread.
964 * has already been recorded -- or if that grace period has in in rcu_start_this_gp()
966 * progress in a non-leaf node, no recording is needed because the in rcu_start_this_gp()
968 * Note that rnp_start->lock must not be released. in rcu_start_this_gp()
972 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
975 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
976 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
978 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
983 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); in rcu_start_this_gp()
984 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
995 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
997 if (!rnp->parent) in rcu_start_this_gp()
1017 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1018 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1019 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1027 * Clean up any old requests for the just-ended grace period. Also return
1035 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1037 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1038 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1056 * is dangerous that late in the CPU-down hotplug process. The in swake_up_one_online()
1076 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1078 * sleep upon return, resulting in a grace-period hang), and don't bother
1079 * awakening when there is nothing for the grace-period kthread to do
1084 * So why do the self-wakeup when in an interrupt or softirq handler
1085 * in the grace-period kthread's context? Because the kthread might have
1087 * pre-sleep check of the awaken condition. In this case, a wakeup really
1103 * If there is room, assign a ->gp_seq number to any callbacks on this
1105 * that were previously assigned a ->gp_seq number that has since proven
1107 * ->gp_seq number while RCU is idle, but with reference to a non-root
1110 * the RCU grace-period kthread.
1112 * The caller must hold rnp->lock with interrupts disabled.
1123 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1126 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1129 * Callbacks are often registered with incomplete grace-period in rcu_accelerate_cbs()
1135 * accelerating callback invocation to an earlier grace-period in rcu_accelerate_cbs()
1139 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1143 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1148 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1155 * rcu_node structure's ->lock be held. It consults the cached value
1156 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1157 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1158 * while holding the leaf rcu_node structure's ->lock.
1168 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1170 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1183 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1185 * invoke it repeatedly. As long as it is not invoked -too- often...
1186 * Returns true if the RCU grace-period kthread needs to be awakened.
1188 * The caller must hold rnp->lock with interrupts disabled.
1196 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1200 * Find all callbacks whose ->gp_seq numbers indicate that they in rcu_advance_cbs()
1203 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1211 * that the RCU grace-period kthread be awakened.
1217 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) in rcu_advance_cbs_nowake()
1220 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) in rcu_advance_cbs_nowake()
1239 * Update CPU-local rcu_data state to record the beginnings and ends of
1240 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1242 * Returns true if the grace-period kthread needs to be awakened.
1252 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1256 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1257 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1260 rdp->core_needs_qs = false; in __note_gp_changes()
1261 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1265 if (rdp->core_needs_qs) in __note_gp_changes()
1266 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1269 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ in __note_gp_changes()
1270 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1271 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1277 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1278 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1279 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1280 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1283 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1284 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1285 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1286 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) in __note_gp_changes()
1287 WRITE_ONCE(rdp->last_sched_clock, jiffies); in __note_gp_changes()
1288 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1300 rnp = rdp->mynode; in note_gp_changes()
1301 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1302 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1316 /* Register a counter to suppress debugging grace-period delays. */
1341 static void rcu_gp_slow(int delay) in rcu_gp_slow() argument
1343 if (!rcu_gp_slow_is_suppressed() && delay > 0 && in rcu_gp_slow()
1344 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) in rcu_gp_slow()
1345 schedule_timeout_idle(delay); in rcu_gp_slow()
1350 /* Allow rcutorture to stall the grace-period kthread. */
1358 /* Actually implement the aforementioned wait. */
1369 pr_alert("%s: Wait complete\n", __func__); in rcu_gp_torture_wait()
1390 // If RCU was idle, note beginning of GP. in rcu_poll_gp_seq_start()
1407 // end of that GP. Either way, zero counter to avoid counter-wrap in rcu_poll_gp_seq_end()
1457 * wait tail: Tracks the set of nodes, which need to
1458 * wait for the current GP to complete.
1464 * At every grace period init, a new wait node is added
1465 * to the llist. This wait node is used as wait tail
1467 * number of wait nodes, if all wait nodes are in use
1473 * in the llist should be used as a wait-tail for this
1474 * grace period, therefore users which should wait due
1478 * Below is an illustration of how the done and wait
1486 * +----------+ +--------+ +-------+
1488 * | head |---------> | cb2 |--------->| cb1 |
1490 * +----------+ +--------+ +-------+
1496 * WAIT TAIL
1500 * +----------+ +--------+ +--------+ +-------+
1502 * | head ------> wait |------> cb2 |------> | cb1 |
1504 * +----------+ +--------+ +--------+ +-------+
1516 * +----------+ +--------+ +--------+ +-------+
1518 * | head ------> wait |------> cb2 |------> | cb1 |
1520 * +----------+ +--------+ +--------+ +-------+
1526 * WAIT TAIL DONE TAIL
1530 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1532 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1534 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1545 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1547 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1549 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1556 * to use the rel-acq semantics. If the concurrent kworker
1571 * +----------+ +--------+
1573 * | head ------> wait |
1575 * +----------+ +--------+
1581 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node; in rcu_sr_is_wait_head()
1592 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) in rcu_sr_get_wait_head()
1593 return &sr_wn->node; in rcu_sr_get_wait_head()
1603 atomic_set_release(&sr_wn->inuse, 0); in rcu_sr_put_wait_head()
1615 unsigned long oldstate = (unsigned long) rs->head.func; in rcu_sr_normal_complete()
1623 complete(&rs->completion); in rcu_sr_normal_complete()
1635 * follow acq-rel semantics. in rcu_sr_normal_gp_cleanup_work()
1646 head = done->next; in rcu_sr_normal_gp_cleanup_work()
1647 done->next = NULL; in rcu_sr_normal_gp_cleanup_work()
1651 * done tail which is acq-read above is not removed in rcu_sr_normal_gp_cleanup_work()
1690 llist_for_each_safe(rcu, next, wait_tail->next) { in rcu_sr_normal_gp_cleanup()
1696 wait_tail->next = next; in rcu_sr_normal_gp_cleanup()
1704 * wait head if no inflight-workers. If there are in-flight workers, in rcu_sr_normal_gp_cleanup()
1705 * they will remove the last wait head. in rcu_sr_normal_gp_cleanup()
1709 if (wait_tail->next && wait_tail->next->next == NULL && in rcu_sr_normal_gp_cleanup()
1710 rcu_sr_is_wait_head(wait_tail->next) && in rcu_sr_normal_gp_cleanup()
1712 rcu_sr_put_wait_head(wait_tail->next); in rcu_sr_normal_gp_cleanup()
1713 wait_tail->next = NULL; in rcu_sr_normal_gp_cleanup()
1722 * of outstanding users(if still left) and releasing wait-heads in rcu_sr_normal_gp_cleanup()
1725 if (wait_tail->next) { in rcu_sr_normal_gp_cleanup()
1752 /* Inject a wait-dummy-node. */ in rcu_sr_normal_gp_init()
1757 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), in rcu_sr_normal_gp_init()
1769 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next); in rcu_sr_normal_add_req()
1816 * separator to the llist, because there were no left any dummy-nodes. in rcu_gp_init()
1818 * Number of dummy-nodes is fixed, it could be that we are run out of in rcu_gp_init()
1826 * Apply per-leaf buffered online and offline operations to in rcu_gp_init()
1828 * wait for subsequent online CPUs, and that RCU hooks in the CPU in rcu_gp_init()
1840 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1841 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1849 /* Record old state, apply changes to ->qsmaskinit field. */ in rcu_gp_init()
1850 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1851 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1853 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ in rcu_gp_init()
1854 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1856 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1859 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1866 * If all waited-on tasks from prior grace period are in rcu_gp_init()
1869 * clear ->wait_blkd_tasks. Otherwise, if one of this in rcu_gp_init()
1871 * simply clear ->wait_blkd_tasks. in rcu_gp_init()
1873 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1874 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1875 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1876 if (!rnp->qsmaskinit) in rcu_gp_init()
1887 * Set the quiescent-state-needed bits in all the rcu_node in rcu_gp_init()
1888 * structures for all currently online CPUs in breadth-first in rcu_gp_init()
1904 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1905 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1906 if (rnp == rdp->mynode) in rcu_gp_init()
1909 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1910 rnp->level, rnp->grplo, in rcu_gp_init()
1911 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1912 /* Quiescent states for tasks on any now-offline CPUs. */ in rcu_gp_init()
1913 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1914 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1915 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1916 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1931 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1942 // Someone like call_rcu() requested a force-quiescent-state scan. in rcu_gp_fqs_check_wake()
1948 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
1955 * Do one round of quiescent-state forcing.
1972 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs); in rcu_gp_fqs()
1976 /* Collect dyntick-idle snapshots. */ in rcu_gp_fqs()
1979 /* Handle dyntick-idle and offline CPUs. */ in rcu_gp_fqs()
1982 /* Clear flag to prevent immediate re-entry. */ in rcu_gp_fqs()
1991 * Loop doing repeated quiescent-state forcing until the grace period ends.
2032 * is required only for single-node rcu_node trees because readers blocking in rcu_gp_fqs_loop()
2034 * For multi-node trees, checking the root node's ->qsmask suffices, because a in rcu_gp_fqs_loop()
2035 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from in rcu_gp_fqs_loop()
2038 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_fqs_loop()
2041 /* If time for quiescent-state forcing, do it. */ in rcu_gp_fqs_loop()
2056 ret = 0; /* Force full wait till next FQS. */ in rcu_gp_fqs_loop()
2070 j = rcu_state.jiffies_force_qs - j; in rcu_gp_fqs_loop()
2093 gp_duration = rcu_state.gp_end - rcu_state.gp_start; in rcu_gp_cleanup()
2109 * Propagate new ->gp_seq value to rcu_node structures so that in rcu_gp_cleanup()
2110 * other CPUs don't have to wait until the start of the next grace in rcu_gp_cleanup()
2112 * RCU grace-period initialization races by forcing the end of in rcu_gp_cleanup()
2123 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2124 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
2125 if (!rnp->parent) in rcu_gp_cleanup()
2128 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2130 /* smp_mb() provided by prior unlock-lock pair. */ in rcu_gp_cleanup()
2134 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { in rcu_gp_cleanup()
2146 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ in rcu_gp_cleanup()
2155 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
2156 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2166 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records in rcu_gp_cleanup()
2170 // hold the ->nocb_lock needed to safely access an offloaded in rcu_gp_cleanup()
2171 // ->cblist. We do not want to acquire that lock because in rcu_gp_cleanup()
2181 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. in rcu_gp_cleanup()
2183 // ->gp_flags bits. in rcu_gp_cleanup()
2205 /* Handle grace-period start. */ in rcu_gp_kthread()
2225 /* Handle quiescent-state forcing. */ in rcu_gp_kthread()
2228 /* Handle grace-period end. */ in rcu_gp_kthread()
2237 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2238 * another grace period is required. Whether we wake the grace-period
2239 * kthread or it awakens itself for the next round of quiescent-state
2240 * forcing, that kthread will clean up after the just-completed grace
2241 * period. Note that the caller must hold rnp->lock, which is released
2245 __releases(rcu_get_root()->lock) in rcu_report_qs_rsp()
2260 * is the grace-period snapshot, which means that the quiescent states
2261 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2264 * As a special case, if mask is zero, the bit-already-cleared check is
2266 * during grace-period initialization.
2270 __releases(rnp->lock) in rcu_report_qs_rnp()
2279 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
2291 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp()
2292 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
2293 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2294 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2295 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2296 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2302 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
2303 mask = rnp->grpmask; in rcu_report_qs_rnp()
2304 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2312 rnp = rnp->parent; in rcu_report_qs_rnp()
2314 oldmask = READ_ONCE(rnp_c->qsmask); in rcu_report_qs_rnp()
2322 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ in rcu_report_qs_rnp()
2328 * RCU grace period. The caller must hold the corresponding rnp->lock with
2334 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2343 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
2348 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2349 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2359 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ in rcu_report_unblock_qs_rnp()
2360 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2361 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2378 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2379 rnp = rdp->mynode; in rcu_report_qs_rdp()
2381 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2382 rdp->gpwrap) { in rcu_report_qs_rdp()
2390 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2394 mask = rdp->grpmask; in rcu_report_qs_rdp()
2395 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2396 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2415 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2416 /* ^^^ Released rnp->lock */ in rcu_report_qs_rdp()
2429 /* Check for grace-period ends and beginnings. */ in rcu_check_quiescent_state()
2436 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2441 * period? If no, then exit and wait for the next call. in rcu_check_quiescent_state()
2443 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2453 /* Return true if callback-invocation time limit exceeded. */
2467 * period. Throttle as specified by rdp->blimit.
2484 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2486 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2488 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2500 * completion (materialized by rnp->gp_seq update) thanks to the in rcu_do_batch()
2507 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); in rcu_do_batch()
2509 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; in rcu_do_batch()
2510 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2511 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && in rcu_do_batch()
2522 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2523 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2525 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2527 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2543 f = rhp->func; in rcu_do_batch()
2545 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); in rcu_do_batch()
2570 // But rcuc kthreads can delay quiescent-state in rcu_do_batch()
2572 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && in rcu_do_batch()
2574 rdp->rcu_cpu_has_work = 1; in rcu_do_batch()
2581 rdp->n_cbs_invoked += count; in rcu_do_batch()
2586 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2587 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2590 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2591 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2592 rdp->blimit = blimit; in rcu_do_batch()
2594 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ in rcu_do_batch()
2595 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2596 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2597 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2598 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2599 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2605 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2609 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2610 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2618 * This function is invoked from each scheduling-clock interrupt,
2619 * and checks to see if this CPU is in a non-context-switch quiescent
2620 * state, for example, user mode or idle loop. It also schedules RCU
2634 trace_rcu_utilization(TPS("Start scheduler-tick")); in rcu_sched_clock_irq()
2637 /* The load-acquire pairs with the store-release setting to true. */ in rcu_sched_clock_irq()
2639 /* Idle and userspace execution already are quiescent states. */ in rcu_sched_clock_irq()
2653 trace_rcu_utilization(TPS("End scheduler-tick")); in rcu_sched_clock_irq()
2677 rcu_state.cbovldnext |= !!rnp->cbovldmask; in force_qs_rnp()
2678 if (rnp->qsmask == 0) { in force_qs_rnp()
2683 * priority-boost blocked readers. in force_qs_rnp()
2686 /* rcu_initiate_boost() releases rnp->lock */ in force_qs_rnp()
2692 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { in force_qs_rnp()
2699 mask |= rdp->grpmask; in force_qs_rnp()
2703 rsmask |= rdp->grpmask; in force_qs_rnp()
2706 /* Idle/offline CPUs, report (releases rnp->lock). */ in force_qs_rnp()
2707 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2720 * CPUs are in dyntick-idle mode.
2733 for (; rnp != NULL; rnp = rnp->parent) { in rcu_force_quiescent_state()
2735 !raw_spin_trylock(&rnp->fqslock); in rcu_force_quiescent_state()
2737 raw_spin_unlock(&rnp_old->fqslock); in rcu_force_quiescent_state()
2746 raw_spin_unlock(&rnp_old->fqslock); in rcu_force_quiescent_state()
2770 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2775 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2790 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { in rcu_core()
2792 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2800 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2803 /* Re-invoke RCU core processing if there are callbacks remaining. */ in rcu_core()
2804 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_core()
2814 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2826 * is invoked from idle in rcu_wake_cond()
2869 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2916 * Spawn per-CPU RCU core processing kthreads.
2933 rcu_segcblist_enqueue(&rdp->cblist, head); in rcutree_enqueue()
2937 rcu_segcblist_n_cbs(&rdp->cblist)); in rcutree_enqueue()
2940 rcu_segcblist_n_cbs(&rdp->cblist)); in rcutree_enqueue()
2941 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in rcutree_enqueue()
2945 * Handle any core-RCU processing required by a call_rcu() invocation.
2953 * core in order to force a re-evaluation of RCU's idleness. in call_rcu_core()
2969 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in call_rcu_core()
2970 rdp->qlen_last_fqs_check + qhimark)) { in call_rcu_core()
2977 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in call_rcu_core()
2980 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in call_rcu_core()
2981 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in call_rcu_core()
2982 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in call_rcu_core()
2984 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in call_rcu_core()
2985 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in call_rcu_core()
2999 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3001 * structure's ->lock.
3008 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
3009 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
3011 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
3016 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3022 * grace periods. This omission is due to the need for no-CBs CPUs to
3023 * be holding ->nocb_lock to do this check, which is too heavy for a
3024 * common-case operation.
3028 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
3031 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
3032 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
3048 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); in __call_rcu_common()
3057 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in __call_rcu_common()
3060 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu_common()
3063 head->func = func; in __call_rcu_common()
3064 head->next = NULL; in __call_rcu_common()
3074 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu_common()
3080 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu_common()
3081 rcu_segcblist_init(&rdp->cblist); in __call_rcu_common()
3098 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3099 * flush all lazy callbacks (including the new one) to the main ->cblist while
3106 * period elapses, in other words after all pre-existing RCU read-side
3111 * memory pressure and on systems which are lightly loaded or mostly idle.
3127 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3129 * ->cblist to prevent starting of grace periods too soon.
3136 * period elapses, in other words after all pre-existing RCU read-side
3138 * might well execute concurrently with RCU read-side critical sections
3141 * RCU read-side critical sections are delimited by rcu_read_lock()
3144 * or softirqs have been disabled also serve as RCU read-side critical
3149 * all pre-existing RCU read-side critical section. On systems with more
3152 * last RCU read-side critical section whose beginning preceded the call
3153 * to call_rcu(). It also means that each CPU executing an RCU read-side
3156 * of that RCU read-side critical section. Note that these guarantees
3157 * include CPUs that are offline, idle, or executing in user mode, as
3163 * between the call to call_rcu() and the invocation of "func()" -- even
3167 * Implementation of these memory-ordering guarantees is described here:
3168 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3177 * During early boot, any blocking grace-period wait automatically
3184 * grace-period optimization is ignored once the scheduler is running.
3224 /* Now we can wait. */ in synchronize_rcu_normal()
3233 * synchronize_rcu - wait until a grace period has elapsed.
3237 * read-side critical sections have completed. Note, however, that
3239 * concurrently with new RCU read-side critical sections that began while
3242 * RCU read-side critical sections are delimited by rcu_read_lock()
3245 * or softirqs have been disabled also serve as RCU read-side critical
3249 * Note that this guarantee implies further memory-ordering guarantees.
3252 * the end of its last RCU read-side critical section whose beginning
3254 * an RCU read-side critical section that extends beyond the return from
3257 * that RCU read-side critical section. Note that these guarantees include
3258 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3264 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3267 * Implementation of these memory-ordering guarantees is described here:
3268 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3278 "Illegal synchronize_rcu() in RCU read-side critical section"); in synchronize_rcu()
3292 // reuse of ->gp_seq_polled_snap. in synchronize_rcu()
3296 // Update the normal grace-period counters to record in synchronize_rcu()
3303 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) in synchronize_rcu()
3304 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in synchronize_rcu()
3310 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3319 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; in get_completed_synchronize_rcu_full()
3320 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; in get_completed_synchronize_rcu_full()
3325 * get_state_synchronize_rcu - Snapshot current RCU state
3334 * Any prior manipulation of RCU-protected data must happen in get_state_synchronize_rcu()
3335 * before the load from ->gp_seq. in get_state_synchronize_rcu()
3343 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3344 * @rgosp: location to place combined normal/expedited grace-period state
3346 * Places the normal and expedited grace-period states in @rgosp. This
3363 * Any prior manipulation of RCU-protected data must happen in get_state_synchronize_rcu_full()
3364 * before the loads from ->gp_seq and ->expedited_sequence. in get_state_synchronize_rcu_full()
3367 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq); in get_state_synchronize_rcu_full()
3368 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); in get_state_synchronize_rcu_full()
3385 rnp = rdp->mynode; in start_poll_synchronize_rcu_common()
3392 // from which they are updated at grace-period start, as required. in start_poll_synchronize_rcu_common()
3400 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3418 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3421 * Places the normal and expedited grace-period states in *@rgos. This
3437 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3444 * can explicitly wait for a grace period, for example, by passing @oldstate
3451 * more than a billion grace periods (and way more on a 64-bit system!).
3453 * (many hours even on 32-bit systems) should check them occasionally and
3456 * to get a guaranteed-completed grace-period state.
3458 * In addition, because oldstate compresses the grace-period state for
3464 * This function provides the same memory-ordering guarantees that
3481 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3488 * can explicitly wait for a grace period, for example, by passing @rgosp
3493 * for more than a billion grace periods (and way more on a 64-bit
3495 * long time periods (many hours even on 32-bit systems) should check
3498 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3499 * grace-period state.
3501 * This function provides the same memory-ordering guarantees that would
3505 * ->gp_seq field be checked instead of that of the rcu_state structure.
3506 * The problem is that the just-ending grace-period's callbacks can be
3507 * invoked between the time that the root rcu_node structure's ->gp_seq
3508 * field is updated and the time that the rcu_state structure's ->gp_seq
3517 smp_mb(); // Order against root rcu_node structure grace-period cleanup. in poll_state_synchronize_rcu_full()
3518 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || in poll_state_synchronize_rcu_full()
3519 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || in poll_state_synchronize_rcu_full()
3520 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || in poll_state_synchronize_rcu_full()
3521 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { in poll_state_synchronize_rcu_full()
3530 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3535 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3539 * more than 2 billion grace periods (and way more on a 64-bit system!),
3542 * This function provides the same memory-ordering guarantees that
3555 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3561 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3566 * more than 2 billion grace periods (and way more on a 64-bit system!),
3569 * This function provides the same memory-ordering guarantees that
3582 * Check to see if there is any immediate RCU-related work to be done by
3585 * CPU-local state are performed first. However, we must check for CPU
3592 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3603 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ in rcu_pending()
3613 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3618 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3621 /* Has RCU gone idle with this CPU needing another grace period? */ in rcu_pending()
3622 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3624 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3628 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3629 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3660 rhp->next = rhp; // Mark the callback as having been invoked. in rcu_barrier_callback()
3662 rcu_barrier_trace(TPS("LastCB"), -1, s); in rcu_barrier_callback()
3665 rcu_barrier_trace(TPS("CB"), -1, s); in rcu_barrier_callback()
3670 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3675 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); in rcu_barrier_entrain()
3682 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3683 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_entrain()
3684 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_entrain()
3688 * queue. This way we don't wait for bypass timer that can reach seconds in rcu_barrier_entrain()
3691 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3693 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3694 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_entrain()
3697 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_entrain()
3698 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3703 smp_store_release(&rdp->barrier_seq_snap, gseq); in rcu_barrier_entrain()
3707 * Called with preemption disabled, and from cross-cpu IRQ context.
3715 WARN_ON_ONCE(cpu != rdp->cpu); in rcu_barrier_handler()
3723 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3725 * Note that this primitive does not necessarily wait for an RCU grace period
3738 rcu_barrier_trace(TPS("Begin"), -1, s); in rcu_barrier()
3745 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3755 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3759 * to avoid a too-soon return to zero in case of an immediate in rcu_barrier()
3760 * invocation of the just-enqueued callback (or preemption of in rcu_barrier()
3761 * this task). Exclude CPU-hotplug operations to ensure that no in rcu_barrier()
3762 * offline non-offloaded CPU has callbacks queued. in rcu_barrier()
3776 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) in rcu_barrier()
3779 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { in rcu_barrier()
3780 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
3787 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
3797 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
3808 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ in rcu_barrier()
3812 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3818 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
3829 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3834 * rcu_barrier() system-wide from use of this function, which means that
3835 * callers might needlessly wait a second or three.
3877 return -EAGAIN; in param_set_do_rcu_barrier()
3880 atomic_inc((atomic_t *)kp->arg); in param_set_do_rcu_barrier()
3882 atomic_dec((atomic_t *)kp->arg); in param_set_do_rcu_barrier()
3892 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg)); in param_get_do_rcu_barrier()
3904 * This will not be stable unless the rcu_node structure's ->lock is
3910 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
3916 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3920 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); in rcu_rdp_cpu_online()
3956 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask in rcu_lockdep_current_cpu_online()
3979 * and all tasks that were preempted within an RCU read-side critical
3981 * read-side critical section. Some other CPU is reporting this fact with
3982 * the specified rcu_node structure's ->lock held and interrupts disabled.
3984 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
3985 * the leaf rcu_node structure's ->qsmaskinit field has already been
4001 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || in rcu_cleanup_dead_rnp()
4005 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
4006 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
4010 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
4012 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
4013 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
4023 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4025 * must hold the corresponding leaf rcu_node ->lock with interrupts
4035 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
4037 mask = rnp->grpmask; in rcu_init_new_rnp()
4038 rnp = rnp->parent; in rcu_init_new_rnp()
4042 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
4043 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
4051 * Do boot-time initialization of a CPU's per-CPU RCU data.
4060 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4061 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4062 WARN_ON_ONCE(ct->nesting != 1); in rcu_boot_init_percpu_data()
4064 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4065 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4066 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4067 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4068 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4069 rdp->last_sched_clock = jiffies; in rcu_boot_init_percpu_data()
4070 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4097 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_exp_par_gp_kworker()
4099 if (rnp->exp_kworker) in rcu_spawn_exp_par_gp_kworker()
4105 rnp->grplo, rnp->grphi); in rcu_spawn_exp_par_gp_kworker()
4108 WRITE_ONCE(rnp->exp_kworker, kworker); in rcu_spawn_exp_par_gp_kworker()
4111 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); in rcu_spawn_exp_par_gp_kworker()
4113 rcu_thread_affine_rnp(kworker->task, rnp); in rcu_spawn_exp_par_gp_kworker()
4114 wake_up_process(kworker->task); in rcu_spawn_exp_par_gp_kworker()
4130 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); in rcu_start_exp_gp_kworker()
4136 mutex_lock(&rnp->kthread_mutex); in rcu_spawn_rnp_kthreads()
4139 mutex_unlock(&rnp->kthread_mutex); in rcu_spawn_rnp_kthreads()
4144 * Invoked early in the CPU-online process, when pretty much all services
4147 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4149 * accept some slop in the rsp->gp_seq access due to the fact that this
4150 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4162 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4163 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4164 rdp->blimit = blimit; in rcutree_prepare_cpu()
4165 ct->nesting = 1; /* CPU not up, no tearing. */ in rcutree_prepare_cpu()
4169 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be in rcutree_prepare_cpu()
4170 * (re-)initialized. in rcutree_prepare_cpu()
4172 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4173 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4176 * Add CPU to leaf rcu_node pending-online bitmask. Any needed in rcutree_prepare_cpu()
4180 rnp = rdp->mynode; in rcutree_prepare_cpu()
4182 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4183 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4184 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4185 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4186 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4187 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4188 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4189 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4206 return smp_load_acquire(&rdp->beenonline); in rcu_cpu_beenfullyonline()
4210 * Near the end of the CPU-online process. Pretty much all services
4220 rnp = rdp->mynode; in rcutree_online_cpu()
4222 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4228 // Stop-machine done, so allow nohz_full to disable tick. in rcutree_online_cpu()
4235 * (both expedited and normal) will wait on it. Note that this means that
4236 * incoming CPUs are not allowed to use RCU read-side critical sections
4256 if (rdp->cpu_started) in rcutree_report_cpu_starting()
4258 rdp->cpu_started = true; in rcutree_report_cpu_starting()
4260 rnp = rdp->mynode; in rcutree_report_cpu_starting()
4261 mask = rdp->grpmask; in rcutree_report_cpu_starting()
4266 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); in rcutree_report_cpu_starting()
4268 newcpu = !(rnp->expmaskinitnext & mask); in rcutree_report_cpu_starting()
4269 rnp->expmaskinitnext |= mask; in rcutree_report_cpu_starting()
4273 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcutree_report_cpu_starting()
4274 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_starting()
4275 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_starting()
4278 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ in rcutree_report_cpu_starting()
4284 /* Report QS -after- changing ->qsmaskinitnext! */ in rcutree_report_cpu_starting()
4285 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcutree_report_cpu_starting()
4290 smp_store_release(&rdp->beenonline, true); in rcutree_report_cpu_starting()
4291 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ in rcutree_report_cpu_starting()
4296 * the rcu_node tree's ->qsmaskinitnext bit masks.
4309 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_report_cpu_dead()
4313 * may introduce a new READ-side while it is actually off the QS masks. in rcutree_report_cpu_dead()
4322 mask = rdp->grpmask; in rcutree_report_cpu_dead()
4324 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcutree_report_cpu_dead()
4325 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_dead()
4326 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_dead()
4327 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcutree_report_cpu_dead()
4328 /* Report quiescent state -before- changing ->qsmaskinitnext! */ in rcutree_report_cpu_dead()
4330 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcutree_report_cpu_dead()
4333 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); in rcutree_report_cpu_dead()
4336 rdp->cpu_started = false; in rcutree_report_cpu_dead()
4341 * The outgoing CPU has just passed through the dying-idle state, and we
4357 if (rcu_segcblist_empty(&rdp->cblist)) { in rcutree_migrate_callbacks()
4365 my_rnp = my_rdp->mynode; in rcutree_migrate_callbacks()
4372 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4375 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4376 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); in rcutree_migrate_callbacks()
4389 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4390 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4392 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4393 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()
4405 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); in rcutree_dead_cpu()
4406 // Stop-machine done, so allow nohz_full to disable tick. in rcutree_dead_cpu()
4419 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
4421 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); in rcutree_dying_cpu()
4422 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in rcutree_dying_cpu()
4423 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); in rcutree_dying_cpu()
4438 rnp = rdp->mynode; in rcutree_offline_cpu()
4440 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4443 // nohz_full CPUs need the tick for stop-machine to work quickly in rcutree_offline_cpu()
4450 * On non-huge systems, use expedited RCU grace periods to make suspend
4486 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n… in rcu_spawn_gp_kthread()
4500 /* This is a pre-SMP initcall, we expect a single CPU */ in rcu_spawn_gp_kthread()
4503 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() in rcu_spawn_gp_kthread()
4507 rcu_spawn_rnp_kthreads(rdp->mynode); in rcu_spawn_gp_kthread()
4517 * initialization process. Before this is called, the idle task might
4518 * contain synchronous grace-period primitives (during which time, this idle
4519 * task is booting the system, and such primitives are no-ops). After this
4520 * function is called, any synchronous grace-period primitives are run as
4534 // Fix up the ->gp_seq counters. in rcu_scheduler_starting()
4537 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in rcu_scheduler_starting()
4567 /* Initialize the level-tracking arrays. */ in rcu_init_one()
4571 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; in rcu_init_one()
4576 for (i = rcu_num_lvls - 1; i >= 0; i--) { in rcu_init_one()
4583 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
4584 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4586 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
4587 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
4588 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
4589 rnp->qsmask = 0; in rcu_init_one()
4590 rnp->qsmaskinit = 0; in rcu_init_one()
4591 rnp->grplo = j * cpustride; in rcu_init_one()
4592 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4593 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4594 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4596 rnp->grpnum = 0; in rcu_init_one()
4597 rnp->grpmask = 0; in rcu_init_one()
4598 rnp->parent = NULL; in rcu_init_one()
4600 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4601 rnp->grpmask = BIT(rnp->grpnum); in rcu_init_one()
4602 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
4603 j / levelspread[i - 1]; in rcu_init_one()
4605 rnp->level = i; in rcu_init_one()
4606 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4608 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
4609 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
4610 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
4611 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
4612 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
4613 mutex_init(&rnp->kthread_mutex); in rcu_init_one()
4614 raw_spin_lock_init(&rnp->exp_poll_lock); in rcu_init_one()
4615 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in rcu_init_one()
4616 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); in rcu_init_one()
4624 while (i > rnp->grphi) in rcu_init_one()
4626 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4627 per_cpu_ptr(&rcu_data, i)->barrier_head.next = in rcu_init_one()
4628 &per_cpu_ptr(&rcu_data, i)->barrier_head; in rcu_init_one()
4634 * Force priority from the kernel command-line into range.
4658 * the ->node array in the rcu_state structure.
4694 /* If the compile-time values are accurate, just leave. */ in rcu_init_geometry()
4702 * The boot-time rcu_fanout_leaf parameter must be at least two in rcu_init_geometry()
4704 * Complain and fall back to the compile-time values if this in rcu_init_geometry()
4719 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; in rcu_init_geometry()
4723 * If this limit is exceeded, fall back to the compile-time values. in rcu_init_geometry()
4725 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { in rcu_init_geometry()
4738 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; in rcu_init_geometry()
4760 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4763 level = rnp->level; in rcu_dump_rcu_node_tree()
4765 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()
4788 * We don't need protection against CPU-hotplug here because in rcu_init()
4806 /* -After- the rcu_node ->lock fields are initialized! */ in rcu_init()
4812 // Kick-start in case any polled grace periods started early. in rcu_init()