Lines Matching +full:b +full:- +full:side
1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
23 * non-preemptible reads are also safe. NOCB kthreads and in rcu_rdp_is_offloaded()
30 lockdep_is_held(&rdp->nocb_lock) || in rcu_rdp_is_offloaded()
38 return rcu_segcblist_is_offloaded(&rdp->cblist); in rcu_rdp_is_offloaded()
51 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", in rcu_bootup_announce_oddness()
58 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); in rcu_bootup_announce_oddness()
60 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); in rcu_bootup_announce_oddness()
62 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", in rcu_bootup_announce_oddness()
65 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", in rcu_bootup_announce_oddness()
74 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); in rcu_bootup_announce_oddness()
76 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); in rcu_bootup_announce_oddness()
78 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); in rcu_bootup_announce_oddness()
80 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); in rcu_bootup_announce_oddness()
82 …pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs… in rcu_bootup_announce_oddness()
84 …pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next… in rcu_bootup_announce_oddness()
86 …pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sch… in rcu_bootup_announce_oddness()
88 pr_info("\tKick kthreads if too-long grace period.\n"); in rcu_bootup_announce_oddness()
90 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); in rcu_bootup_announce_oddness()
92 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); in rcu_bootup_announce_oddness()
135 * Queues a task preempted within an RCU-preempt read-side critical
136 * section into the appropriate location within the ->blkd_tasks list,
138 * periods. The ->gp_tasks pointer indicates which element the normal
139 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
142 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
157 * their RCU read-side critical sections. At that point, the ->gp_tasks
158 * pointer will equal the ->exp_tasks pointer, at which point the end of
163 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
165 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
166 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
167 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
168 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
172 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
175 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & in rcu_preempt_ctxt_queue()
176 rdp->grpmask); in rcu_preempt_ctxt_queue()
180 * this could be an if-statement. In practice, when I tried in rcu_preempt_ctxt_queue()
192 * GP but not blocking the already-waiting expedited GP. in rcu_preempt_ctxt_queue()
194 * blocking the already-waiting GPs. in rcu_preempt_ctxt_queue()
196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
227 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
238 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
250 * block either grace period, update the ->gp_tasks and/or in rcu_preempt_ctxt_queue()
251 * ->exp_tasks pointers, respectively, to reference the newly in rcu_preempt_ctxt_queue()
254 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { in rcu_preempt_ctxt_queue()
255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
256 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); in rcu_preempt_ctxt_queue()
258 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
261 !(rnp->qsmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
263 !(rnp->expmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
272 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change. in rcu_preempt_ctxt_queue()
274 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) in rcu_preempt_ctxt_queue()
277 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); in rcu_preempt_ctxt_queue()
278 ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp); in rcu_preempt_ctxt_queue()
282 * Record a preemptible-RCU quiescent state for the specified CPU.
285 * grace period need not wait on any RCU read-side critical section that
287 * in an RCU read-side critical section, it has already added itself to
288 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
290 * in an RCU read-side critical section.
292 * Unlike non-preemptible-RCU, quiescent state reports for expedited
301 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { in rcu_qs()
305 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); in rcu_qs()
307 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); in rcu_qs()
313 * context-switched away from. If this task is in an RCU read-side
317 * RCU read-side critical section. Therefore, the current grace period
320 * rnp->gp_tasks becomes NULL.
332 …WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side crit… in rcu_note_context_switch()
334 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch()
336 /* Possibly blocking in an RCU read-side critical section. */ in rcu_note_context_switch()
337 rnp = rdp->mynode; in rcu_note_context_switch()
339 t->rcu_read_unlock_special.b.blocked = true; in rcu_note_context_switch()
340 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
348 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_note_context_switch()
350 t->pid, in rcu_note_context_switch()
351 (rnp->qsmask & rdp->grpmask) in rcu_note_context_switch()
352 ? rnp->gp_seq in rcu_note_context_switch()
353 : rcu_seq_snap(&rnp->gp_seq)); in rcu_note_context_switch()
360 * Either we were not in an RCU read-side critical section to in rcu_note_context_switch()
363 * for this CPU. Again, if we were in an RCU read-side critical in rcu_note_context_switch()
369 if (rdp->cpu_no_qs.b.exp) in rcu_note_context_switch()
379 * answer, it must hold the rcu_node's ->lock.
383 return READ_ONCE(rnp->gp_tasks) != NULL; in rcu_preempt_blocked_readers_cgp()
386 /* limit value for ->rcu_read_lock_nesting. */
391 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1); in rcu_preempt_read_enter()
396 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1; in rcu_preempt_read_exit()
398 WRITE_ONCE(current->rcu_read_lock_nesting, ret); in rcu_preempt_read_exit()
404 WRITE_ONCE(current->rcu_read_lock_nesting, val); in rcu_preempt_depth_set()
409 * Just increment ->rcu_read_lock_nesting, shared state will be updated
418 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); in __rcu_read_lock()
425 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
426 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
428 * in an RCU read-side critical section and other special cases.
436 barrier(); // critical-section exit before .s check. in __rcu_read_unlock()
437 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock()
449 * Advance a ->blkd_tasks-list pointer to the next entry, instead
457 np = t->rcu_node_entry.next; in rcu_next_node_entry()
458 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
465 * preempted within an RCU read-side critical section.
469 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
492 * t->rcu_read_unlock_special cannot change. in rcu_preempt_deferred_qs_irqrestore()
494 special = t->rcu_read_unlock_special; in rcu_preempt_deferred_qs_irqrestore()
496 if (!special.s && !rdp->cpu_no_qs.b.exp) { in rcu_preempt_deferred_qs_irqrestore()
500 t->rcu_read_unlock_special.s = 0; in rcu_preempt_deferred_qs_irqrestore()
501 if (special.b.need_qs) { in rcu_preempt_deferred_qs_irqrestore()
503 rdp->cpu_no_qs.b.norm = false; in rcu_preempt_deferred_qs_irqrestore()
515 * blocked-tasks list below. in rcu_preempt_deferred_qs_irqrestore()
517 if (rdp->cpu_no_qs.b.exp) in rcu_preempt_deferred_qs_irqrestore()
520 /* Clean up if blocked during RCU read-side critical section. */ in rcu_preempt_deferred_qs_irqrestore()
521 if (special.b.blocked) { in rcu_preempt_deferred_qs_irqrestore()
529 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
531 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
534 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && in rcu_preempt_deferred_qs_irqrestore()
535 (!empty_norm || rnp->qsmask)); in rcu_preempt_deferred_qs_irqrestore()
537 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ in rcu_preempt_deferred_qs_irqrestore()
539 list_del_init(&t->rcu_node_entry); in rcu_preempt_deferred_qs_irqrestore()
540 t->rcu_blocked_node = NULL; in rcu_preempt_deferred_qs_irqrestore()
542 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
543 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
544 WRITE_ONCE(rnp->gp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
545 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
546 WRITE_ONCE(rnp->exp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
548 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ in rcu_preempt_deferred_qs_irqrestore()
549 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore()
550 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
551 WRITE_ONCE(rnp->boost_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
557 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, in rcu_preempt_deferred_qs_irqrestore()
563 rnp->gp_seq, in rcu_preempt_deferred_qs_irqrestore()
564 0, rnp->qsmask, in rcu_preempt_deferred_qs_irqrestore()
565 rnp->level, in rcu_preempt_deferred_qs_irqrestore()
566 rnp->grplo, in rcu_preempt_deferred_qs_irqrestore()
567 rnp->grphi, in rcu_preempt_deferred_qs_irqrestore()
568 !!rnp->gp_tasks); in rcu_preempt_deferred_qs_irqrestore()
583 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); in rcu_preempt_deferred_qs_irqrestore()
590 * Is a deferred quiescent-state pending, and are we also not in
591 * an RCU read-side critical section? It is the caller's responsibility
600 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) || in rcu_preempt_need_deferred_qs()
601 READ_ONCE(t->rcu_read_unlock_special.s)) && in rcu_preempt_need_deferred_qs()
608 * not being in an RCU read-side critical section. The caller must
623 * Minimal handler to give the scheduler a chance to re-evaluate.
630 rdp->defer_qs_iw_pending = false; in rcu_preempt_deferred_qs_handler()
636 * read-side critical section.
654 struct rcu_node *rnp = rdp->mynode; in rcu_read_unlock_special()
656 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) || in rcu_read_unlock_special()
657 (rdp->grpmask & READ_ONCE(rnp->expmask)) || in rcu_read_unlock_special()
659 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) || in rcu_read_unlock_special()
661 t->rcu_blocked_node); in rcu_read_unlock_special()
676 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) { in rcu_read_unlock_special()
677 // Get scheduler to re-evaluate and call hooks. in rcu_read_unlock_special()
681 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD( in rcu_read_unlock_special()
684 init_irq_work(&rdp->defer_qs_iw, in rcu_read_unlock_special()
686 rdp->defer_qs_iw_pending = true; in rcu_read_unlock_special()
687 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); in rcu_read_unlock_special()
700 * invoked -before- updating this rnp's ->gp_seq.
703 * block the newly created grace period, so set up ->gp_tasks accordingly.
714 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { in rcu_preempt_check_blocked_tasks()
715 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); in rcu_preempt_check_blocked_tasks()
716 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
718 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), in rcu_preempt_check_blocked_tasks()
719 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
721 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
754 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && in rcu_flavor_sched_clock_irq()
755 !t->rcu_read_unlock_special.b.need_qs && in rcu_flavor_sched_clock_irq()
757 t->rcu_read_unlock_special.b.need_qs = true; in rcu_flavor_sched_clock_irq()
761 * Check for a task exiting while in a preemptible-RCU read-side
772 if (unlikely(!list_empty(¤t->rcu_node_entry))) { in exit_rcu()
775 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); in exit_rcu()
786 * Dump the blocked-tasks state, but limit the list dump to the
799 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", in dump_blkd_tasks()
800 __func__, rnp->grplo, rnp->grphi, rnp->level, in dump_blkd_tasks()
801 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); in dump_blkd_tasks()
802 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in dump_blkd_tasks()
803 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", in dump_blkd_tasks()
804 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); in dump_blkd_tasks()
805 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", in dump_blkd_tasks()
806 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), in dump_blkd_tasks()
807 READ_ONCE(rnp->exp_tasks)); in dump_blkd_tasks()
808 pr_info("%s: ->blkd_tasks", __func__); in dump_blkd_tasks()
810 list_for_each(lhp, &rnp->blkd_tasks) { in dump_blkd_tasks()
816 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in dump_blkd_tasks()
820 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, in dump_blkd_tasks()
821 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); in dump_blkd_tasks()
839 rdp->cpu_no_qs.b.norm = false; in rcu_read_unlock_strict()
867 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); in rcu_qs()
868 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) in rcu_qs()
874 * emergency, invoke rcu_momentary_eqs() to do a heavy-weight
875 * dyntick-idle quiescent state visible to other CPUs, which will in
949 // non-preemptible kernels, there can be no context switches within RCU
950 // read-side critical sections, which in turn means that the leaf rcu_node
951 // structure's blocked-tasks list is always empty. is therefore no need to
958 if (READ_ONCE(rdp->cpu_no_qs.b.exp)) in rcu_preempt_deferred_qs()
969 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
973 * Check to see if this CPU is in a non-context-switch quiescent state,
987 * references only CPU-local variables that other CPUs in rcu_flavor_sched_clock_irq()
997 * while in preemptible RCU read-side critical sections.
1004 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
1009 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); in dump_blkd_tasks()
1027 WRITE_ONCE(rdp->rcuc_activity, jiffies); in rcu_cpu_kthread_setup()
1033 return rdp->nocb_cb_kthread == current; in rcu_is_callbacks_nocb_kthread()
1040 * Is the current CPU running the RCU-callbacks kthread?
1045 return rdp->rcu_cpu_kthread_task == current || in rcu_is_callbacks_kthread()
1052 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1053 * or ->boost_tasks, advancing the pointer to the next task in the
1054 * ->blkd_tasks list.
1065 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
1066 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1073 * might exit their RCU read-side critical sections on their own. in rcu_boost()
1075 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1084 * those blocking the pre-existing normal grace period. in rcu_boost()
1086 if (rnp->exp_tasks != NULL) in rcu_boost()
1087 tb = rnp->exp_tasks; in rcu_boost()
1089 tb = rnp->boost_tasks; in rcu_boost()
1095 * exits its outermost RCU read-side critical section. Then in rcu_boost()
1099 * Note that task t must acquire rnp->lock to remove itself from in rcu_boost()
1100 * the ->blkd_tasks list, which it will do from exit() if from in rcu_boost()
1102 * stay around at least until we drop rnp->lock. Note that in rcu_boost()
1103 * rnp->lock also resolves races between our priority boosting in rcu_boost()
1104 * and task t's exiting its outermost RCU read-side critical in rcu_boost()
1108 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
1110 /* Lock only for side effect: boosts task t's priority. */ in rcu_boost()
1111 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1112 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1113 rnp->n_boosts++; in rcu_boost()
1115 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1116 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1120 * Priority-boosting kthread, one per leaf rcu_node.
1130 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); in rcu_boost_kthread()
1132 rcu_wait(READ_ONCE(rnp->boost_tasks) || in rcu_boost_kthread()
1133 READ_ONCE(rnp->exp_tasks)); in rcu_boost_kthread()
1135 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); in rcu_boost_kthread()
1142 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); in rcu_boost_kthread()
1156 * blocking the current grace period, and, if so, tell the per-rcu_node
1160 * The caller must hold rnp->lock, which this function releases.
1161 * The ->boost_kthread_task is immortal, so we don't need to worry
1165 __releases(rnp->lock) in rcu_initiate_boost()
1168 if (!rnp->boost_kthread_task || in rcu_initiate_boost()
1169 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) { in rcu_initiate_boost()
1173 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1174 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1175 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1176 rnp->qsmask == 0 && in rcu_initiate_boost()
1177 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld || in rcu_initiate_boost()
1179 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1180 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); in rcu_initiate_boost()
1182 rcu_wake_cond(rnp->boost_kthread_task, in rcu_initiate_boost()
1183 READ_ONCE(rnp->boost_kthread_status)); in rcu_initiate_boost()
1192 * Do priority-boost accounting for the start of a new grace period.
1196 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1200 * Create an RCU-boost kthread for the specified node if one does not
1206 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_one_boost_kthread()
1210 if (rnp->boost_kthread_task) in rcu_spawn_one_boost_kthread()
1219 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1231 __releases(rnp->lock) in rcu_initiate_boost()
1248 * grace-period kthread will do force_quiescent_state() processing?
1267 * Bind the RCU grace-period kthreads to the housekeeping CPU.