Lines Matching +full:non +full:- +full:urgent
1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
14 * For detailed explanation of Read-Copy Update mechanism see -
67 #include "../time/tick-internal.h"
88 return READ_ONCE(rdp->gpwrap_count); in rcu_get_gpwrap_count()
95 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
119 /* Control rcu_node-tree auto-balancing at boot time. */
138 * boot-time false positives from lockdep-RCU error checking. Finally, it
172 * real-time priority(enabling/disabling) is controlled by
178 /* Delay in jiffies for grace-period initialization delays, debug only. */
207 * for non-zero delays, the overall slowdown of grace periods is constant
217 * structure's ->lock, but of course results can be subject to change.
232 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
233 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
238 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
241 * This is a special-purpose function to be used in the softirq
242 * infrastructure and perhaps the occasional long-running softirq
266 "Illegal rcu_softirq_qs() in RCU read-side critical section"); in rcu_softirq_qs()
276 * to the next non-quiescent value.
278 * The non-atomic test/increment sequence works because the upper bits
279 * of the ->state variable are manipulated only by the corresponding CPU,
299 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
324 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_stopped_since()
339 return false; // Non-zero, so report failure; in rcu_watching_zero_in_eqs()
340 smp_rmb(); // Order *vp read and CT state re-read. in rcu_watching_zero_in_eqs()
349 * quiescent state is urgent, so we burn an atomic operation and full
353 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
370 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
372 * If the current CPU is idle and running at a first-level (not nested)
392 /* Non-idle interrupt or nested idle interrupt */ in rcu_is_cpu_rrupt_from_idle()
397 * Non nested idle interrupt (interrupting section where RCU in rcu_is_cpu_rrupt_from_idle()
426 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
445 * quiescent-state help from rcu_note_context_switch().
453 * Make sure that we give the grace-period kthread time to detect any
472 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); in adjust_jiffies_till_sched_qs()
482 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); in param_set_first_fqs_jiffies()
494 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); in param_set_next_fqs_jiffies()
547 * Send along grace-period-related data for rcutorture diagnostics.
556 /* Gather grace-period sequence numbers for rcutorture diagnostics. */
565 /* Format grace-period sequence numbers for rcutorture diagnostics. */
579 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
596 * get re-enabled again.
602 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) in rcu_irq_work_resched()
605 if (IS_ENABLED(CONFIG_VIRT_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) in rcu_irq_work_resched()
618 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
636 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
647 * in a timely manner, the RCU grace-period kthread sets that CPU's
648 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
656 * interrupt or exception. In that case, the RCU grace-period kthread
672 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
673 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
674 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
683 // handler and that the rcu_node lock is an irq-disabled lock in __rcu_irq_enter_check_tick()
684 // prevents self-deadlock. So we can safely recheck under the lock. in __rcu_irq_enter_check_tick()
686 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
687 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
690 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
691 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
693 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
699 * Check to see if any future non-offloaded RCU-related work will need
702 * it is -not- an exported member of the RCU API. This is used by
703 * the idle-entry code to figure out whether it is safe to disable the
704 * scheduler-clock interrupt.
706 * Just check whether or not this CPU has non-offloaded RCU callbacks
711 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
717 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
722 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
723 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
724 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
725 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
726 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
727 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
732 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
735 * An @true return means that this CPU can safely enter RCU read-side
758 * If a holdout task is actually running, request an urgent quiescent
778 * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value.
781 * @lag_gps = 0 means we reset it back to the boot-time value.
797 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
804 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag, in rcu_gpnum_ovf()
805 rnp->gp_seq)) { in rcu_gpnum_ovf()
806 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
807 WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1); in rcu_gpnum_ovf()
809 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
810 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
831 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_save()
832 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) { in rcu_watching_snap_save()
833 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_save()
834 rcu_gpnum_ovf(rdp->mynode, rdp); in rcu_watching_snap_save()
858 struct rcu_node *rnp = rdp->mynode; in rcu_watching_snap_recheck()
865 * read-side critical section that started before the beginning in rcu_watching_snap_recheck()
868 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) { in rcu_watching_snap_recheck()
869 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_recheck()
878 * the CPU-offline process, or, failing that, by rcu_gp_init() in rcu_watching_snap_recheck()
880 * last task on a leaf rcu_node structure exiting its RCU read-side in rcu_watching_snap_recheck()
885 * The rcu_node structure's ->lock is held here, which excludes in rcu_watching_snap_recheck()
886 * the relevant portions the CPU-hotplug code, the grace-period in rcu_watching_snap_recheck()
895 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", in rcu_watching_snap_recheck()
896 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_watching_snap_recheck()
897 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_watching_snap_recheck()
898 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_watching_snap_recheck()
899 …pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n… in rcu_watching_snap_recheck()
900 …__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rc… in rcu_watching_snap_recheck()
902 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], in rcu_watching_snap_recheck()
903 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, in rcu_watching_snap_recheck()
904 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); in rcu_watching_snap_recheck()
913 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs in rcu_watching_snap_recheck()
920 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_watching_snap_recheck()
924 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_watching_snap_recheck()
926 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
928 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
932 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! in rcu_watching_snap_recheck()
934 * And some in-kernel loops check need_resched() before calling in rcu_watching_snap_recheck()
936 * running in-kernel with scheduling-clock interrupts disabled. in rcu_watching_snap_recheck()
939 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_watching_snap_recheck()
940 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_watching_snap_recheck()
942 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
943 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
944 ret = -1; in rcu_watching_snap_recheck()
948 * If more than halfway to RCU CPU stall-warning time, invoke in rcu_watching_snap_recheck()
956 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_watching_snap_recheck()
957 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
958 ret = -1; in rcu_watching_snap_recheck()
961 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_watching_snap_recheck()
962 (rnp->ffmask & rdp->grpmask)) { in rcu_watching_snap_recheck()
963 rdp->rcu_iw_pending = true; in rcu_watching_snap_recheck()
964 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_watching_snap_recheck()
965 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_watching_snap_recheck()
968 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { in rcu_watching_snap_recheck()
969 int cpu = rdp->cpu; in rcu_watching_snap_recheck()
975 rsrp = &rdp->snap_record; in rcu_watching_snap_recheck()
976 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); in rcu_watching_snap_recheck()
977 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); in rcu_watching_snap_recheck()
978 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); in rcu_watching_snap_recheck()
979 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu); in rcu_watching_snap_recheck()
980 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu); in rcu_watching_snap_recheck()
981 rsrp->nr_csw = nr_context_switches_cpu(cpu); in rcu_watching_snap_recheck()
982 rsrp->jiffies = jiffies; in rcu_watching_snap_recheck()
983 rsrp->gp_seq = rdp->gp_seq; in rcu_watching_snap_recheck()
990 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
994 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in trace_rcu_this_gp()
995 gp_seq_req, rnp->level, in trace_rcu_this_gp()
996 rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
1000 * rcu_start_this_gp - Request the start of a particular grace period
1007 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1008 * is reason to awaken the grace-period kthread.
1010 * The caller must hold the specified rcu_node structure's ->lock, which
1011 * is why the caller is responsible for waking the grace-period kthread.
1024 * has already been recorded -- or if that grace period has in in rcu_start_this_gp()
1026 * progress in a non-leaf node, no recording is needed because the in rcu_start_this_gp()
1028 * Note that rnp_start->lock must not be released. in rcu_start_this_gp()
1032 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
1035 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
1036 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
1038 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
1043 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); in rcu_start_this_gp()
1044 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
1055 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
1057 if (!rnp->parent) in rcu_start_this_gp()
1077 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1078 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1079 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1087 * Clean up any old requests for the just-ended grace period. Also return
1095 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1097 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1098 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1104 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1106 * sleep upon return, resulting in a grace-period hang), and don't bother
1107 * awakening when there is nothing for the grace-period kthread to do
1112 * So why do the self-wakeup when in an interrupt or softirq handler
1113 * in the grace-period kthread's context? Because the kthread might have
1115 * pre-sleep check of the awaken condition. In this case, a wakeup really
1131 * If there is room, assign a ->gp_seq number to any callbacks on this
1133 * that were previously assigned a ->gp_seq number that has since proven
1135 * ->gp_seq number while RCU is idle, but with reference to a non-root
1138 * the RCU grace-period kthread.
1140 * The caller must hold rnp->lock with interrupts disabled.
1151 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1154 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1157 * Callbacks are often registered with incomplete grace-period in rcu_accelerate_cbs()
1163 * accelerating callback invocation to an earlier grace-period in rcu_accelerate_cbs()
1167 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1171 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1176 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1183 * rcu_node structure's ->lock be held. It consults the cached value
1184 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1185 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1186 * while holding the leaf rcu_node structure's ->lock.
1196 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1198 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1211 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1213 * invoke it repeatedly. As long as it is not invoked -too- often...
1214 * Returns true if the RCU grace-period kthread needs to be awakened.
1216 * The caller must hold rnp->lock with interrupts disabled.
1224 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1228 * Find all callbacks whose ->gp_seq numbers indicate that they in rcu_advance_cbs()
1231 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1239 * that the RCU grace-period kthread be awakened.
1245 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) in rcu_advance_cbs_nowake()
1248 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) in rcu_advance_cbs_nowake()
1267 * Update CPU-local rcu_data state to record the beginnings and ends of
1268 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1270 * Returns true if the grace-period kthread needs to be awakened.
1280 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1284 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1285 unlikely(rdp->gpwrap)) { in __note_gp_changes()
1288 rdp->core_needs_qs = false; in __note_gp_changes()
1289 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1293 if (rdp->core_needs_qs) in __note_gp_changes()
1294 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1297 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ in __note_gp_changes()
1298 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1299 unlikely(rdp->gpwrap)) { in __note_gp_changes()
1305 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1306 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1307 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1308 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1311 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1312 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1313 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1314 if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap) in __note_gp_changes()
1315 WRITE_ONCE(rdp->last_sched_clock, jiffies); in __note_gp_changes()
1316 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1328 rnp = rdp->mynode; in note_gp_changes()
1329 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1330 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1344 /* Register a counter to suppress debugging grace-period delays. */
1378 /* Allow rcutorture to stall the grace-period kthread. */
1435 // end of that GP. Either way, zero counter to avoid counter-wrap in rcu_poll_gp_seq_end()
1501 * in the llist should be used as a wait-tail for this
1514 * +----------+ +--------+ +-------+
1516 * | head |---------> | cb2 |--------->| cb1 |
1518 * +----------+ +--------+ +-------+
1528 * +----------+ +--------+ +--------+ +-------+
1530 * | head ------> wait |------> cb2 |------> | cb1 |
1532 * +----------+ +--------+ +--------+ +-------+
1544 * +----------+ +--------+ +--------+ +-------+
1546 * | head ------> wait |------> cb2 |------> | cb1 |
1548 * +----------+ +--------+ +--------+ +-------+
1558 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1560 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1562 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1573 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1575 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1577 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1584 * to use the rel-acq semantics. If the concurrent kworker
1599 * +----------+ +--------+
1601 * | head ------> wait |
1603 * +----------+ +--------+
1609 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node; in rcu_sr_is_wait_head()
1620 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) in rcu_sr_get_wait_head()
1621 return &sr_wn->node; in rcu_sr_get_wait_head()
1631 atomic_set_release(&sr_wn->inuse, 0); in rcu_sr_put_wait_head()
1637 static int rcu_normal_wake_from_gp = -1;
1647 !poll_state_synchronize_rcu_full(&rs->oldstate), in rcu_sr_normal_complete()
1651 complete(&rs->completion); in rcu_sr_normal_complete()
1663 * follow acq-rel semantics. in rcu_sr_normal_gp_cleanup_work()
1674 head = done->next; in rcu_sr_normal_gp_cleanup_work()
1675 done->next = NULL; in rcu_sr_normal_gp_cleanup_work()
1679 * done tail which is acq-read above is not removed in rcu_sr_normal_gp_cleanup_work()
1718 llist_for_each_safe(rcu, next, wait_tail->next) { in rcu_sr_normal_gp_cleanup()
1724 wait_tail->next = next; in rcu_sr_normal_gp_cleanup()
1732 * wait head if no inflight-workers. If there are in-flight workers, in rcu_sr_normal_gp_cleanup()
1737 if (wait_tail->next && wait_tail->next->next == NULL && in rcu_sr_normal_gp_cleanup()
1738 rcu_sr_is_wait_head(wait_tail->next) && in rcu_sr_normal_gp_cleanup()
1740 rcu_sr_put_wait_head(wait_tail->next); in rcu_sr_normal_gp_cleanup()
1741 wait_tail->next = NULL; in rcu_sr_normal_gp_cleanup()
1750 * of outstanding users(if still left) and releasing wait-heads in rcu_sr_normal_gp_cleanup()
1753 if (wait_tail->next) { in rcu_sr_normal_gp_cleanup()
1780 /* Inject a wait-dummy-node. */ in rcu_sr_normal_gp_init()
1785 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), in rcu_sr_normal_gp_init()
1797 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next); in rcu_sr_normal_add_req()
1844 * use-after-free errors. For a detailed explanation of this race, see in rcu_gp_init()
1849 * the rcu_state's gp_seq, for a reason. See the Quick-Quiz on in rcu_gp_init()
1850 * Single-node systems for more details (in Data-Structures.rst). in rcu_gp_init()
1866 * separator to the llist, because there were no left any dummy-nodes. in rcu_gp_init()
1868 * Number of dummy-nodes is fixed, it could be that we are run out of in rcu_gp_init()
1876 * Apply per-leaf buffered online and offline operations to in rcu_gp_init()
1894 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1895 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1903 /* Record old state, apply changes to ->qsmaskinit field. */ in rcu_gp_init()
1904 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1905 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1907 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ in rcu_gp_init()
1908 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1910 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1913 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1920 * If all waited-on tasks from prior grace period are in rcu_gp_init()
1923 * clear ->wait_blkd_tasks. Otherwise, if one of this in rcu_gp_init()
1925 * simply clear ->wait_blkd_tasks. in rcu_gp_init()
1927 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1928 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1929 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1930 if (!rnp->qsmaskinit) in rcu_gp_init()
1941 * Set the quiescent-state-needed bits in all the rcu_node in rcu_gp_init()
1942 * structures for all currently online CPUs in breadth-first in rcu_gp_init()
1958 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1959 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1960 if (rnp == rdp->mynode) in rcu_gp_init()
1963 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1964 rnp->level, rnp->grplo, in rcu_gp_init()
1965 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1967 * Quiescent states for tasks on any now-offline CPUs. Since we in rcu_gp_init()
1972 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1973 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1974 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1975 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1990 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
2001 // Someone like call_rcu() requested a force-quiescent-state scan. in rcu_gp_fqs_check_wake()
2007 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
2014 * Do one round of quiescent-state forcing.
2031 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs); in rcu_gp_fqs()
2035 /* Collect dyntick-idle snapshots. */ in rcu_gp_fqs()
2038 /* Handle dyntick-idle and offline CPUs. */ in rcu_gp_fqs()
2041 /* Clear flag to prevent immediate re-entry. */ in rcu_gp_fqs()
2050 * Loop doing repeated quiescent-state forcing until the grace period ends.
2091 * is required only for single-node rcu_node trees because readers blocking in rcu_gp_fqs_loop()
2093 * For multi-node trees, checking the root node's ->qsmask suffices, because a in rcu_gp_fqs_loop()
2094 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from in rcu_gp_fqs_loop()
2097 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_fqs_loop()
2100 /* If time for quiescent-state forcing, do it. */ in rcu_gp_fqs_loop()
2129 j = rcu_state.jiffies_force_qs - j; in rcu_gp_fqs_loop()
2152 gp_duration = rcu_state.gp_end - rcu_state.gp_start; in rcu_gp_cleanup()
2168 * Propagate new ->gp_seq value to rcu_node structures so that in rcu_gp_cleanup()
2171 * RCU grace-period initialization races by forcing the end of in rcu_gp_cleanup()
2182 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2183 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
2184 if (!rnp->parent) in rcu_gp_cleanup()
2187 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2189 /* smp_mb() provided by prior unlock-lock pair. */ in rcu_gp_cleanup()
2193 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { in rcu_gp_cleanup()
2205 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ in rcu_gp_cleanup()
2214 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
2215 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2225 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records in rcu_gp_cleanup()
2229 // hold the ->nocb_lock needed to safely access an offloaded in rcu_gp_cleanup()
2230 // ->cblist. We do not want to acquire that lock because in rcu_gp_cleanup()
2240 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. in rcu_gp_cleanup()
2242 // ->gp_flags bits. in rcu_gp_cleanup()
2264 /* Handle grace-period start. */ in rcu_gp_kthread()
2284 /* Handle quiescent-state forcing. */ in rcu_gp_kthread()
2287 /* Handle grace-period end. */ in rcu_gp_kthread()
2296 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2297 * another grace period is required. Whether we wake the grace-period
2298 * kthread or it awakens itself for the next round of quiescent-state
2299 * forcing, that kthread will clean up after the just-completed grace
2300 * period. Note that the caller must hold rnp->lock, which is released
2304 __releases(rcu_get_root()->lock) in rcu_report_qs_rsp()
2319 * is the grace-period snapshot, which means that the quiescent states
2320 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2323 * As a special case, if mask is zero, the bit-already-cleared check is
2325 * during grace-period initialization.
2329 __releases(rnp->lock) in rcu_report_qs_rnp()
2338 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
2350 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp()
2351 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
2352 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2353 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2354 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2355 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2361 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
2362 mask = rnp->grpmask; in rcu_report_qs_rnp()
2363 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2371 rnp = rnp->parent; in rcu_report_qs_rnp()
2373 oldmask = READ_ONCE(rnp_c->qsmask); in rcu_report_qs_rnp()
2381 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ in rcu_report_qs_rnp()
2387 * RCU grace period. The caller must hold the corresponding rnp->lock with
2393 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2402 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
2407 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2408 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2418 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ in rcu_report_unblock_qs_rnp()
2419 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2420 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2437 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2438 rnp = rdp->mynode; in rcu_report_qs_rdp()
2440 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2441 rdp->gpwrap) { in rcu_report_qs_rdp()
2449 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2453 mask = rdp->grpmask; in rcu_report_qs_rdp()
2454 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2455 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2474 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2475 /* ^^^ Released rnp->lock */ in rcu_report_qs_rdp()
2488 /* Check for grace-period ends and beginnings. */ in rcu_check_quiescent_state()
2495 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2502 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2512 /* Return true if callback-invocation time limit exceeded. */
2526 * period. Throttle as specified by rdp->blimit.
2543 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2545 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2547 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2559 * completion (materialized by rnp->gp_seq update) thanks to the in rcu_do_batch()
2566 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); in rcu_do_batch()
2568 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; in rcu_do_batch()
2569 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2570 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && in rcu_do_batch()
2581 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2582 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2584 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2586 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2602 f = rhp->func; in rcu_do_batch()
2604 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); in rcu_do_batch()
2629 // But rcuc kthreads can delay quiescent-state in rcu_do_batch()
2631 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && in rcu_do_batch()
2633 rdp->rcu_cpu_has_work = 1; in rcu_do_batch()
2640 rdp->n_cbs_invoked += count; in rcu_do_batch()
2645 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2646 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2649 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2650 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2651 rdp->blimit = blimit; in rcu_do_batch()
2653 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ in rcu_do_batch()
2654 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2655 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2656 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2657 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2658 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2664 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2668 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2669 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2677 * This function is invoked from each scheduling-clock interrupt,
2678 * and checks to see if this CPU is in a non-context-switch quiescent
2693 trace_rcu_utilization(TPS("Start scheduler-tick")); in rcu_sched_clock_irq()
2696 /* The load-acquire pairs with the store-release setting to true. */ in rcu_sched_clock_irq()
2712 trace_rcu_utilization(TPS("End scheduler-tick")); in rcu_sched_clock_irq()
2736 rcu_state.cbovldnext |= !!rnp->cbovldmask; in force_qs_rnp()
2737 if (rnp->qsmask == 0) { in force_qs_rnp()
2742 * priority-boost blocked readers. in force_qs_rnp()
2745 /* rcu_initiate_boost() releases rnp->lock */ in force_qs_rnp()
2751 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { in force_qs_rnp()
2758 mask |= rdp->grpmask; in force_qs_rnp()
2762 rsmask |= rdp->grpmask; in force_qs_rnp()
2765 /* Idle/offline CPUs, report (releases rnp->lock). */ in force_qs_rnp()
2766 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2779 * CPUs are in dyntick-idle mode.
2792 for (; rnp != NULL; rnp = rnp->parent) { in rcu_force_quiescent_state()
2794 !raw_spin_trylock(&rnp->fqslock); in rcu_force_quiescent_state()
2796 raw_spin_unlock(&rnp_old->fqslock); in rcu_force_quiescent_state()
2805 raw_spin_unlock(&rnp_old->fqslock); in rcu_force_quiescent_state()
2829 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2834 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2849 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { in rcu_core()
2851 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2859 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2862 /* Re-invoke RCU core processing if there are callbacks remaining. */ in rcu_core()
2863 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_core()
2873 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2928 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2975 * Spawn per-CPU RCU core processing kthreads.
2992 rcu_segcblist_enqueue(&rdp->cblist, head); in rcutree_enqueue()
2994 rcu_segcblist_n_cbs(&rdp->cblist)); in rcutree_enqueue()
2995 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in rcutree_enqueue()
2999 * Handle any core-RCU processing required by a call_rcu() invocation.
3007 * core in order to force a re-evaluation of RCU's idleness. in call_rcu_core()
3023 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in call_rcu_core()
3024 rdp->qlen_last_fqs_check + qhimark)) { in call_rcu_core()
3031 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in call_rcu_core()
3034 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in call_rcu_core()
3035 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in call_rcu_core()
3036 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in call_rcu_core()
3038 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in call_rcu_core()
3039 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in call_rcu_core()
3053 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3055 * structure's ->lock.
3062 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
3063 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
3065 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
3070 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3076 * grace periods. This omission is due to the need for no-CBs CPUs to
3077 * be holding ->nocb_lock to do this check, which is too heavy for a
3078 * common-case operation.
3082 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
3085 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
3086 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
3102 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); in __call_rcu_common()
3115 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in __call_rcu_common()
3118 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu_common()
3121 head->func = func; in __call_rcu_common()
3122 head->next = NULL; in __call_rcu_common()
3132 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu_common()
3138 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu_common()
3139 rcu_segcblist_init(&rdp->cblist); in __call_rcu_common()
3156 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3157 * flush all lazy callbacks (including the new one) to the main ->cblist while
3164 * period elapses, in other words after all pre-existing RCU read-side
3185 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3187 * ->cblist to prevent starting of grace periods too soon.
3194 * period elapses, in other words after all pre-existing RCU read-side
3196 * might well execute concurrently with RCU read-side critical sections
3205 * RCU read-side critical sections are delimited by rcu_read_lock()
3208 * or softirqs have been disabled also serve as RCU read-side critical
3213 * all pre-existing RCU read-side critical section. On systems with more
3216 * last RCU read-side critical section whose beginning preceded the call
3217 * to call_rcu(). It also means that each CPU executing an RCU read-side
3220 * of that RCU read-side critical section. Note that these guarantees
3227 * between the call to call_rcu() and the invocation of "func()" -- even
3231 * Implementation of these memory-ordering guarantees is described here:
3232 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3237 * callback. This delay can significantly improve energy-efficiency
3238 * on low-utilization battery-powered devices. To avoid this delay,
3239 * in latency-sensitive kernel code, use call_rcu_hurry().
3248 * During early boot, any blocking grace-period wait automatically
3255 * grace-period optimization is ignored once the scheduler is running.
3304 * synchronize_rcu - wait until a grace period has elapsed.
3308 * read-side critical sections have completed. Note, however, that
3310 * concurrently with new RCU read-side critical sections that began while
3313 * RCU read-side critical sections are delimited by rcu_read_lock()
3316 * or softirqs have been disabled also serve as RCU read-side critical
3320 * Note that this guarantee implies further memory-ordering guarantees.
3323 * the end of its last RCU read-side critical section whose beginning
3325 * an RCU read-side critical section that extends beyond the return from
3328 * that RCU read-side critical section. Note that these guarantees include
3335 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3338 * Implementation of these memory-ordering guarantees is described here:
3339 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3349 "Illegal synchronize_rcu() in RCU read-side critical section"); in synchronize_rcu()
3363 // reuse of ->gp_seq_polled_snap. in synchronize_rcu()
3367 // Update the normal grace-period counters to record in synchronize_rcu()
3374 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) in synchronize_rcu()
3375 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in synchronize_rcu()
3381 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3390 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; in get_completed_synchronize_rcu_full()
3391 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; in get_completed_synchronize_rcu_full()
3396 * get_state_synchronize_rcu - Snapshot current RCU state
3405 * Any prior manipulation of RCU-protected data must happen in get_state_synchronize_rcu()
3406 * before the load from ->gp_seq. in get_state_synchronize_rcu()
3414 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3415 * @rgosp: location to place combined normal/expedited grace-period state
3417 * Places the normal and expedited grace-period states in @rgosp. This
3432 * Any prior manipulation of RCU-protected data must happen in get_state_synchronize_rcu_full()
3433 * before the loads from ->gp_seq and ->expedited_sequence. in get_state_synchronize_rcu_full()
3437 // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use in get_state_synchronize_rcu_full()
3439 // the latter here would result in too-short grace periods due to in get_state_synchronize_rcu_full()
3441 rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq); in get_state_synchronize_rcu_full()
3442 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); in get_state_synchronize_rcu_full()
3459 rnp = rdp->mynode; in start_poll_synchronize_rcu_common()
3466 // from which they are updated at grace-period start, as required. in start_poll_synchronize_rcu_common()
3474 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3492 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3495 * Places the normal and expedited grace-period states in *@rgos. This
3511 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3525 * more than a billion grace periods (and way more on a 64-bit system!).
3527 * (many hours even on 32-bit systems) should check them occasionally and
3530 * to get a guaranteed-completed grace-period state.
3532 * In addition, because oldstate compresses the grace-period state for
3538 * This function provides the same memory-ordering guarantees that
3555 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3567 * for more than a billion grace periods (and way more on a 64-bit
3569 * long time periods (many hours even on 32-bit systems) should check
3572 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3573 * grace-period state.
3575 * This function provides the same memory-ordering guarantees that would
3579 * ->gp_seq field be checked instead of that of the rcu_state structure.
3580 * The problem is that the just-ending grace-period's callbacks can be
3581 * invoked between the time that the root rcu_node structure's ->gp_seq
3582 * field is updated and the time that the rcu_state structure's ->gp_seq
3591 smp_mb(); // Order against root rcu_node structure grace-period cleanup. in poll_state_synchronize_rcu_full()
3592 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || in poll_state_synchronize_rcu_full()
3593 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || in poll_state_synchronize_rcu_full()
3594 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || in poll_state_synchronize_rcu_full()
3595 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { in poll_state_synchronize_rcu_full()
3604 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3613 * more than 2 billion grace periods (and way more on a 64-bit system!),
3616 * This function provides the same memory-ordering guarantees that
3629 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3640 * more than 2 billion grace periods (and way more on a 64-bit system!),
3643 * This function provides the same memory-ordering guarantees that
3656 * Check to see if there is any immediate RCU-related work to be done by
3659 * CPU-local state are performed first. However, we must check for CPU
3666 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3687 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3692 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3696 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3698 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3702 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3703 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3734 rhp->next = rhp; // Mark the callback as having been invoked. in rcu_barrier_callback()
3736 rcu_barrier_trace(TPS("LastCB"), -1, s); in rcu_barrier_callback()
3739 rcu_barrier_trace(TPS("CB"), -1, s); in rcu_barrier_callback()
3744 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3749 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); in rcu_barrier_entrain()
3756 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3757 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_entrain()
3758 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_entrain()
3765 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3767 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3768 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_entrain()
3771 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_entrain()
3772 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3777 smp_store_release(&rdp->barrier_seq_snap, gseq); in rcu_barrier_entrain()
3781 * Called with preemption disabled, and from cross-cpu IRQ context.
3789 WARN_ON_ONCE(cpu != rdp->cpu); in rcu_barrier_handler()
3797 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3817 rcu_barrier_trace(TPS("Begin"), -1, s); in rcu_barrier()
3824 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3834 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3838 * to avoid a too-soon return to zero in case of an immediate in rcu_barrier()
3839 * invocation of the just-enqueued callback (or preemption of in rcu_barrier()
3840 * this task). Exclude CPU-hotplug operations to ensure that no in rcu_barrier()
3841 * offline non-offloaded CPU has callbacks queued. in rcu_barrier()
3855 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) in rcu_barrier()
3858 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { in rcu_barrier()
3859 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
3866 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
3876 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
3891 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3897 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
3908 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3913 * rcu_barrier() system-wide from use of this function, which means that
3956 return -EAGAIN; in param_set_do_rcu_barrier()
3959 atomic_inc((atomic_t *)kp->arg); in param_set_do_rcu_barrier()
3961 atomic_dec((atomic_t *)kp->arg); in param_set_do_rcu_barrier()
3971 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg)); in param_get_do_rcu_barrier()
3983 * This will not be stable unless the rcu_node structure's ->lock is
3989 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
3995 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3999 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); in rcu_rdp_cpu_online()
4035 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask in rcu_lockdep_current_cpu_online()
4058 * and all tasks that were preempted within an RCU read-side critical
4060 * read-side critical section. Some other CPU is reporting this fact with
4061 * the specified rcu_node structure's ->lock held and interrupts disabled.
4063 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
4064 * the leaf rcu_node structure's ->qsmaskinit field has already been
4080 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || in rcu_cleanup_dead_rnp()
4084 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
4085 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
4089 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
4091 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
4092 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
4102 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4104 * must hold the corresponding leaf rcu_node ->lock with interrupts
4114 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
4116 mask = rnp->grpmask; in rcu_init_new_rnp()
4117 rnp = rnp->parent; in rcu_init_new_rnp()
4121 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
4122 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
4130 * Do boot-time initialization of a CPU's per-CPU RCU data.
4139 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4140 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4141 WARN_ON_ONCE(ct->nesting != 1); in rcu_boot_init_percpu_data()
4143 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4144 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4145 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4146 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4147 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4148 rdp->last_sched_clock = jiffies; in rcu_boot_init_percpu_data()
4149 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4176 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_exp_par_gp_kworker()
4178 if (rnp->exp_kworker) in rcu_spawn_exp_par_gp_kworker()
4184 rnp->grplo, rnp->grphi); in rcu_spawn_exp_par_gp_kworker()
4187 WRITE_ONCE(rnp->exp_kworker, kworker); in rcu_spawn_exp_par_gp_kworker()
4190 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); in rcu_spawn_exp_par_gp_kworker()
4192 rcu_thread_affine_rnp(kworker->task, rnp); in rcu_spawn_exp_par_gp_kworker()
4193 wake_up_process(kworker->task); in rcu_spawn_exp_par_gp_kworker()
4209 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); in rcu_start_exp_gp_kworker()
4215 mutex_lock(&rnp->kthread_mutex); in rcu_spawn_rnp_kthreads()
4218 mutex_unlock(&rnp->kthread_mutex); in rcu_spawn_rnp_kthreads()
4223 * Invoked early in the CPU-online process, when pretty much all services
4226 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4228 * accept some slop in the rsp->gp_seq access due to the fact that this
4229 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4241 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4242 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4243 rdp->blimit = blimit; in rcutree_prepare_cpu()
4244 ct->nesting = 1; /* CPU not up, no tearing. */ in rcutree_prepare_cpu()
4248 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be in rcutree_prepare_cpu()
4249 * (re-)initialized. in rcutree_prepare_cpu()
4251 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4252 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4255 * Add CPU to leaf rcu_node pending-online bitmask. Any needed in rcutree_prepare_cpu()
4259 rnp = rdp->mynode; in rcutree_prepare_cpu()
4261 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4262 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4263 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4264 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4265 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4266 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4267 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4268 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4287 return smp_load_acquire(&rdp->beenonline); in rcu_cpu_beenfullyonline()
4291 * Near the end of the CPU-online process. Pretty much all services
4301 rnp = rdp->mynode; in rcutree_online_cpu()
4303 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4308 // Stop-machine done, so allow nohz_full to disable tick. in rcutree_online_cpu()
4316 * incoming CPUs are not allowed to use RCU read-side critical sections
4336 if (rdp->cpu_started) in rcutree_report_cpu_starting()
4338 rdp->cpu_started = true; in rcutree_report_cpu_starting()
4340 rnp = rdp->mynode; in rcutree_report_cpu_starting()
4341 mask = rdp->grpmask; in rcutree_report_cpu_starting()
4346 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); in rcutree_report_cpu_starting()
4348 newcpu = !(rnp->expmaskinitnext & mask); in rcutree_report_cpu_starting()
4349 rnp->expmaskinitnext |= mask; in rcutree_report_cpu_starting()
4353 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcutree_report_cpu_starting()
4354 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_starting()
4355 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_starting()
4358 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ in rcutree_report_cpu_starting()
4364 /* Report QS -after- changing ->qsmaskinitnext! */ in rcutree_report_cpu_starting()
4365 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcutree_report_cpu_starting()
4370 smp_store_release(&rdp->beenonline, true); in rcutree_report_cpu_starting()
4371 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ in rcutree_report_cpu_starting()
4376 * the rcu_node tree's ->qsmaskinitnext bit masks.
4389 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_report_cpu_dead()
4393 * may introduce a new READ-side while it is actually off the QS masks. in rcutree_report_cpu_dead()
4401 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); in rcutree_report_cpu_dead()
4408 mask = rdp->grpmask; in rcutree_report_cpu_dead()
4417 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcutree_report_cpu_dead()
4418 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_dead()
4419 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_dead()
4420 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcutree_report_cpu_dead()
4421 /* Report quiescent state -before- changing ->qsmaskinitnext! */ in rcutree_report_cpu_dead()
4423 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcutree_report_cpu_dead()
4426 /* Clear from ->qsmaskinitnext to mark offline. */ in rcutree_report_cpu_dead()
4427 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); in rcutree_report_cpu_dead()
4430 rdp->cpu_started = false; in rcutree_report_cpu_dead()
4435 * The outgoing CPU has just passed through the dying-idle state, and we
4451 if (rcu_segcblist_empty(&rdp->cblist)) { in rcutree_migrate_callbacks()
4459 my_rnp = my_rdp->mynode; in rcutree_migrate_callbacks()
4466 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4469 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4470 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); in rcutree_migrate_callbacks()
4483 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4484 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4486 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4487 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()
4499 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); in rcutree_dead_cpu()
4500 // Stop-machine done, so allow nohz_full to disable tick. in rcutree_dead_cpu()
4513 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
4515 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); in rcutree_dying_cpu()
4516 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in rcutree_dying_cpu()
4517 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); in rcutree_dying_cpu()
4532 rnp = rdp->mynode; in rcutree_offline_cpu()
4534 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4537 // nohz_full CPUs need the tick for stop-machine to work quickly in rcutree_offline_cpu()
4544 * On non-huge systems, use expedited RCU grace periods to make suspend
4580 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n… in rcu_spawn_gp_kthread()
4594 /* This is a pre-SMP initcall, we expect a single CPU */ in rcu_spawn_gp_kthread()
4597 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() in rcu_spawn_gp_kthread()
4601 rcu_spawn_rnp_kthreads(rdp->mynode); in rcu_spawn_gp_kthread()
4612 * contain synchronous grace-period primitives (during which time, this idle
4613 * task is booting the system, and such primitives are no-ops). After this
4614 * function is called, any synchronous grace-period primitives are run as
4628 // Fix up the ->gp_seq counters. in rcu_scheduler_starting()
4631 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in rcu_scheduler_starting()
4661 /* Initialize the level-tracking arrays. */ in rcu_init_one()
4665 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; in rcu_init_one()
4670 for (i = rcu_num_lvls - 1; i >= 0; i--) { in rcu_init_one()
4677 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
4678 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4680 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
4681 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
4682 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
4683 rnp->qsmask = 0; in rcu_init_one()
4684 rnp->qsmaskinit = 0; in rcu_init_one()
4685 rnp->grplo = j * cpustride; in rcu_init_one()
4686 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4687 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4688 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4690 rnp->grpnum = 0; in rcu_init_one()
4691 rnp->grpmask = 0; in rcu_init_one()
4692 rnp->parent = NULL; in rcu_init_one()
4694 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4695 rnp->grpmask = BIT(rnp->grpnum); in rcu_init_one()
4696 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
4697 j / levelspread[i - 1]; in rcu_init_one()
4699 rnp->level = i; in rcu_init_one()
4700 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4702 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
4703 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
4704 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
4705 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
4706 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
4707 mutex_init(&rnp->kthread_mutex); in rcu_init_one()
4708 raw_spin_lock_init(&rnp->exp_poll_lock); in rcu_init_one()
4709 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in rcu_init_one()
4710 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); in rcu_init_one()
4718 while (i > rnp->grphi) in rcu_init_one()
4720 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4721 per_cpu_ptr(&rcu_data, i)->barrier_head.next = in rcu_init_one()
4722 &per_cpu_ptr(&rcu_data, i)->barrier_head; in rcu_init_one()
4728 * Force priority from the kernel command-line into range.
4752 * the ->node array in the rcu_state structure.
4788 /* If the compile-time values are accurate, just leave. */ in rcu_init_geometry()
4796 * The boot-time rcu_fanout_leaf parameter must be at least two in rcu_init_geometry()
4798 * Complain and fall back to the compile-time values if this in rcu_init_geometry()
4813 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; in rcu_init_geometry()
4817 * If this limit is exceeded, fall back to the compile-time values. in rcu_init_geometry()
4819 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { in rcu_init_geometry()
4832 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; in rcu_init_geometry()
4854 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4857 level = rnp->level; in rcu_dump_rcu_node_tree()
4859 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()
4882 * We don't need protection against CPU-hotplug here because in rcu_init()
4906 /* -After- the rcu_node ->lock fields are initialized! */ in rcu_init()
4912 // Kick-start in case any polled grace periods started early. in rcu_init()