Lines Matching +full:lock +full:- +full:detect +full:- +full:function +full:- +full:integer +full:- +full:n +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
14 * For detailed explanation of Read-Copy Update mechanism see -
67 #include "../time/tick-internal.h"
88 return READ_ONCE(rdp->gpwrap_count); in rcu_get_gpwrap_count()
95 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
119 /* Control rcu_node-tree auto-balancing at boot time. */
137 * to detect real grace periods. This variable is also used to suppress
138 * boot-time false positives from lockdep-RCU error checking. Finally, it
173 * real-time priority(enabling/disabling) is controlled by
179 /* Delay in jiffies for grace-period initialization delays, debug only. */
208 * for non-zero delays, the overall slowdown of grace periods is constant
217 * permit this function to be invoked without holding the root rcu_node
218 * structure's ->lock, but of course results can be subject to change.
233 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
234 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
239 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
242 * This is a special-purpose function to be used in the softirq
243 * infrastructure and perhaps the occasional long-running softirq
267 "Illegal rcu_softirq_qs() in RCU read-side critical section"); in rcu_softirq_qs()
277 * to the next non-quiescent value.
279 * The non-atomic test/increment sequence works because the upper bits
280 * of the ->state variable are manipulated only by the corresponding CPU,
300 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
325 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_stopped_since()
329 * Return true if the referenced integer is zero while the specified
340 return false; // Non-zero, so report failure; in rcu_watching_zero_in_eqs()
341 smp_rmb(); // Order *vp read and CT state re-read. in rcu_watching_zero_in_eqs()
354 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
371 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
373 * If the current CPU is idle and running at a first-level (not nested)
420 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
439 * quiescent-state help from rcu_note_context_switch().
447 * Make sure that we give the grace-period kthread time to detect any
466 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); in adjust_jiffies_till_sched_qs()
476 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); in param_set_first_fqs_jiffies()
488 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); in param_set_next_fqs_jiffies()
541 * Send along grace-period-related data for rcutorture diagnostics.
550 /* Gather grace-period sequence numbers for rcutorture diagnostics. */
559 /* Format grace-period sequence numbers for rcutorture diagnostics. */
572 * An empty function that will trigger a reschedule on
573 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
590 * get re-enabled again.
596 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) in rcu_irq_work_resched()
599 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) in rcu_irq_work_resched()
612 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
622 "Bad RCU nmi_nesting counter\n"); in rcu_irq_exit_check_preempt()
630 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
641 * in a timely manner, the RCU grace-period kthread sets that CPU's
642 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
643 * exception will invoke this function, which will turn on the scheduler
644 * tick, which will enable RCU to detect that CPU's quiescent states,
645 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
650 * interrupt or exception. In that case, the RCU grace-period kthread
652 * controlled environments, this function allows RCU to get what it
666 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
667 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
668 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
677 // handler and that the rcu_node lock is an irq-disabled lock in __rcu_irq_enter_check_tick()
678 // prevents self-deadlock. So we can safely recheck under the lock. in __rcu_irq_enter_check_tick()
680 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
681 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
684 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
685 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
687 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
693 * Check to see if any future non-offloaded RCU-related work will need
695 * returning 1 if so. This function is part of the RCU implementation;
696 * it is -not- an exported member of the RCU API. This is used by
697 * the idle-entry code to figure out whether it is safe to disable the
698 * scheduler-clock interrupt.
700 * Just check whether or not this CPU has non-offloaded RCU callbacks
705 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
711 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
716 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
717 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
718 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
719 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
720 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
721 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
726 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
729 * An @true return means that this CPU can safely enter RCU read-side
772 * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value.
775 * @lag_gps = 0 means we reset it back to the boot-time value.
791 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
798 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag, in rcu_gpnum_ovf()
799 rnp->gp_seq)) { in rcu_gpnum_ovf()
800 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
801 WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1); in rcu_gpnum_ovf()
803 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
804 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
825 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_save()
826 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) { in rcu_watching_snap_save()
827 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_save()
828 rcu_gpnum_ovf(rdp->mynode, rdp); in rcu_watching_snap_save()
852 struct rcu_node *rnp = rdp->mynode; in rcu_watching_snap_recheck()
859 * read-side critical section that started before the beginning in rcu_watching_snap_recheck()
862 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) { in rcu_watching_snap_recheck()
863 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_recheck()
872 * the CPU-offline process, or, failing that, by rcu_gp_init() in rcu_watching_snap_recheck()
874 * last task on a leaf rcu_node structure exiting its RCU read-side in rcu_watching_snap_recheck()
879 * The rcu_node structure's ->lock is held here, which excludes in rcu_watching_snap_recheck()
880 * the relevant portions the CPU-hotplug code, the grace-period in rcu_watching_snap_recheck()
889 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", in rcu_watching_snap_recheck()
890 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_watching_snap_recheck()
891 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_watching_snap_recheck()
892 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_watching_snap_recheck()
893 …pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n in rcu_watching_snap_recheck()
894 …__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rc… in rcu_watching_snap_recheck()
895 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", in rcu_watching_snap_recheck()
896 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], in rcu_watching_snap_recheck()
897 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, in rcu_watching_snap_recheck()
898 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); in rcu_watching_snap_recheck()
907 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs in rcu_watching_snap_recheck()
914 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_watching_snap_recheck()
918 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_watching_snap_recheck()
920 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
922 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
926 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! in rcu_watching_snap_recheck()
928 * And some in-kernel loops check need_resched() before calling in rcu_watching_snap_recheck()
930 * running in-kernel with scheduling-clock interrupts disabled. in rcu_watching_snap_recheck()
933 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_watching_snap_recheck()
934 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_watching_snap_recheck()
936 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
937 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
938 ret = -1; in rcu_watching_snap_recheck()
942 * If more than halfway to RCU CPU stall-warning time, invoke in rcu_watching_snap_recheck()
950 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_watching_snap_recheck()
951 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
952 ret = -1; in rcu_watching_snap_recheck()
955 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_watching_snap_recheck()
956 (rnp->ffmask & rdp->grpmask)) { in rcu_watching_snap_recheck()
957 rdp->rcu_iw_pending = true; in rcu_watching_snap_recheck()
958 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_watching_snap_recheck()
959 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_watching_snap_recheck()
962 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { in rcu_watching_snap_recheck()
963 int cpu = rdp->cpu; in rcu_watching_snap_recheck()
969 rsrp = &rdp->snap_record; in rcu_watching_snap_recheck()
970 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); in rcu_watching_snap_recheck()
971 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); in rcu_watching_snap_recheck()
972 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); in rcu_watching_snap_recheck()
973 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu); in rcu_watching_snap_recheck()
974 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu); in rcu_watching_snap_recheck()
975 rsrp->nr_csw = nr_context_switches_cpu(cpu); in rcu_watching_snap_recheck()
976 rsrp->jiffies = jiffies; in rcu_watching_snap_recheck()
977 rsrp->gp_seq = rdp->gp_seq; in rcu_watching_snap_recheck()
984 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
988 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in trace_rcu_this_gp()
989 gp_seq_req, rnp->level, in trace_rcu_this_gp()
990 rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
994 * rcu_start_this_gp - Request the start of a particular grace period
1001 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1002 * is reason to awaken the grace-period kthread.
1004 * The caller must hold the specified rcu_node structure's ->lock, which
1005 * is why the caller is responsible for waking the grace-period kthread.
1017 * structure's lock or bail out if the need for this grace period in rcu_start_this_gp()
1018 * has already been recorded -- or if that grace period has in in rcu_start_this_gp()
1020 * progress in a non-leaf node, no recording is needed because the in rcu_start_this_gp()
1022 * Note that rnp_start->lock must not be released. in rcu_start_this_gp()
1026 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
1029 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
1030 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
1032 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
1037 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); in rcu_start_this_gp()
1038 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
1049 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
1051 if (!rnp->parent) in rcu_start_this_gp()
1071 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1072 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1073 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1081 * Clean up any old requests for the just-ended grace period. Also return
1089 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1091 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1092 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1098 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1100 * sleep upon return, resulting in a grace-period hang), and don't bother
1101 * awakening when there is nothing for the grace-period kthread to do
1106 * So why do the self-wakeup when in an interrupt or softirq handler
1107 * in the grace-period kthread's context? Because the kthread might have
1109 * pre-sleep check of the awaken condition. In this case, a wakeup really
1125 * If there is room, assign a ->gp_seq number to any callbacks on this
1127 * that were previously assigned a ->gp_seq number that has since proven
1129 * ->gp_seq number while RCU is idle, but with reference to a non-root
1130 * rcu_node structure. This function is idempotent, so it does not hurt
1132 * the RCU grace-period kthread.
1134 * The caller must hold rnp->lock with interrupts disabled.
1145 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1148 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1151 * Callbacks are often registered with incomplete grace-period in rcu_accelerate_cbs()
1153 * information requires acquiring a global lock... RCU therefore in rcu_accelerate_cbs()
1157 * accelerating callback invocation to an earlier grace-period in rcu_accelerate_cbs()
1161 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1165 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1170 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1177 * rcu_node structure's ->lock be held. It consults the cached value
1178 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1179 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1180 * while holding the leaf rcu_node structure's ->lock.
1190 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1192 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1205 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1206 * sublist. This function is idempotent, so it does not hurt to
1207 * invoke it repeatedly. As long as it is not invoked -too- often...
1208 * Returns true if the RCU grace-period kthread needs to be awakened.
1210 * The caller must hold rnp->lock with interrupts disabled.
1218 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1222 * Find all callbacks whose ->gp_seq numbers indicate that they in rcu_advance_cbs()
1225 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1233 * that the RCU grace-period kthread be awakened.
1239 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) in rcu_advance_cbs_nowake()
1241 // The grace period cannot end while we hold the rcu_node lock. in rcu_advance_cbs_nowake()
1242 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) in rcu_advance_cbs_nowake()
1261 * Update CPU-local rcu_data state to record the beginnings and ends of
1262 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1264 * Returns true if the grace-period kthread needs to be awakened.
1274 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1278 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1279 unlikely(rdp->gpwrap)) { in __note_gp_changes()
1282 rdp->core_needs_qs = false; in __note_gp_changes()
1283 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1287 if (rdp->core_needs_qs) in __note_gp_changes()
1288 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1291 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ in __note_gp_changes()
1292 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1293 unlikely(rdp->gpwrap)) { in __note_gp_changes()
1296 * set up to detect a quiescent state, otherwise don't in __note_gp_changes()
1299 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1300 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1301 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1302 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1305 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1306 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1307 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1308 if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap) in __note_gp_changes()
1309 WRITE_ONCE(rdp->last_sched_clock, jiffies); in __note_gp_changes()
1310 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1322 rnp = rdp->mynode; in note_gp_changes()
1323 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1324 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1338 /* Register a counter to suppress debugging grace-period delays. */
1372 /* Allow rcutorture to stall the grace-period kthread. */
1389 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); in rcu_gp_torture_wait()
1391 pr_alert("%s: Wait complete\n", __func__); in rcu_gp_torture_wait()
1429 // end of that GP. Either way, zero counter to avoid counter-wrap in rcu_poll_gp_seq_end()
1441 // where caller does not hold the root rcu_node structure's lock.
1458 // caller does not hold the root rcu_node structure's lock.
1495 * in the llist should be used as a wait-tail for this
1508 * +----------+ +--------+ +-------+
1510 * | head |---------> | cb2 |--------->| cb1 |
1512 * +----------+ +--------+ +-------+
1522 * +----------+ +--------+ +--------+ +-------+
1524 * | head ------> wait |------> cb2 |------> | cb1 |
1526 * +----------+ +--------+ +--------+ +-------+
1538 * +----------+ +--------+ +--------+ +-------+
1540 * | head ------> wait |------> cb2 |------> | cb1 |
1542 * +----------+ +--------+ +--------+ +-------+
1552 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1554 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1556 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1567 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1569 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1571 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1578 * to use the rel-acq semantics. If the concurrent kworker
1593 * +----------+ +--------+
1595 * | head ------> wait |
1597 * +----------+ +--------+
1603 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node; in rcu_sr_is_wait_head()
1614 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) in rcu_sr_get_wait_head()
1615 return &sr_wn->node; in rcu_sr_get_wait_head()
1625 atomic_set_release(&sr_wn->inuse, 0); in rcu_sr_put_wait_head()
1639 !poll_state_synchronize_rcu_full(&rs->oldstate), in rcu_sr_normal_complete()
1640 "A full grace period is not passed yet!\n"); in rcu_sr_normal_complete()
1643 complete(&rs->completion); in rcu_sr_normal_complete()
1655 * follow acq-rel semantics. in rcu_sr_normal_gp_cleanup_work()
1666 head = done->next; in rcu_sr_normal_gp_cleanup_work()
1667 done->next = NULL; in rcu_sr_normal_gp_cleanup_work()
1671 * done tail which is acq-read above is not removed in rcu_sr_normal_gp_cleanup_work()
1692 * Helper function for rcu_gp_cleanup().
1710 llist_for_each_safe(rcu, next, wait_tail->next) { in rcu_sr_normal_gp_cleanup()
1716 wait_tail->next = next; in rcu_sr_normal_gp_cleanup()
1724 * wait head if no inflight-workers. If there are in-flight workers, in rcu_sr_normal_gp_cleanup()
1729 if (wait_tail->next && wait_tail->next->next == NULL && in rcu_sr_normal_gp_cleanup()
1730 rcu_sr_is_wait_head(wait_tail->next) && in rcu_sr_normal_gp_cleanup()
1732 rcu_sr_put_wait_head(wait_tail->next); in rcu_sr_normal_gp_cleanup()
1733 wait_tail->next = NULL; in rcu_sr_normal_gp_cleanup()
1742 * of outstanding users(if still left) and releasing wait-heads in rcu_sr_normal_gp_cleanup()
1745 if (wait_tail->next) { in rcu_sr_normal_gp_cleanup()
1753 * Helper function for rcu_gp_init().
1772 /* Inject a wait-dummy-node. */ in rcu_sr_normal_gp_init()
1777 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), in rcu_sr_normal_gp_init()
1789 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next); in rcu_sr_normal_add_req()
1845 * the rcu_sr_normal_gp_init() function was not able to insert a dummy in rcu_gp_init()
1846 * separator to the llist, because there were no left any dummy-nodes. in rcu_gp_init()
1848 * Number of dummy-nodes is fixed, it could be that we are run out of in rcu_gp_init()
1856 * Apply per-leaf buffered online and offline operations to in rcu_gp_init()
1859 * offlining path, when combined with checks in this function, in rcu_gp_init()
1870 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1871 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1879 /* Record old state, apply changes to ->qsmaskinit field. */ in rcu_gp_init()
1880 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1881 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1883 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ in rcu_gp_init()
1884 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1886 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1889 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1896 * If all waited-on tasks from prior grace period are in rcu_gp_init()
1899 * clear ->wait_blkd_tasks. Otherwise, if one of this in rcu_gp_init()
1901 * simply clear ->wait_blkd_tasks. in rcu_gp_init()
1903 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1904 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1905 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1906 if (!rnp->qsmaskinit) in rcu_gp_init()
1917 * Set the quiescent-state-needed bits in all the rcu_node in rcu_gp_init()
1918 * structures for all currently online CPUs in breadth-first in rcu_gp_init()
1934 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1935 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1936 if (rnp == rdp->mynode) in rcu_gp_init()
1939 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1940 rnp->level, rnp->grplo, in rcu_gp_init()
1941 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1942 /* Quiescent states for tasks on any now-offline CPUs. */ in rcu_gp_init()
1943 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1944 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1945 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1946 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1961 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1972 // Someone like call_rcu() requested a force-quiescent-state scan. in rcu_gp_fqs_check_wake()
1978 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
1985 * Do one round of quiescent-state forcing.
2002 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs); in rcu_gp_fqs()
2006 /* Collect dyntick-idle snapshots. */ in rcu_gp_fqs()
2009 /* Handle dyntick-idle and offline CPUs. */ in rcu_gp_fqs()
2012 /* Clear flag to prevent immediate re-entry. */ in rcu_gp_fqs()
2021 * Loop doing repeated quiescent-state forcing until the grace period ends.
2062 * is required only for single-node rcu_node trees because readers blocking in rcu_gp_fqs_loop()
2064 * For multi-node trees, checking the root node's ->qsmask suffices, because a in rcu_gp_fqs_loop()
2065 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from in rcu_gp_fqs_loop()
2068 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_fqs_loop()
2071 /* If time for quiescent-state forcing, do it. */ in rcu_gp_fqs_loop()
2100 j = rcu_state.jiffies_force_qs - j; in rcu_gp_fqs_loop()
2123 gp_duration = rcu_state.gp_end - rcu_state.gp_start; in rcu_gp_cleanup()
2132 * safe for us to drop the lock in order to mark the grace in rcu_gp_cleanup()
2139 * Propagate new ->gp_seq value to rcu_node structures so that in rcu_gp_cleanup()
2142 * RCU grace-period initialization races by forcing the end of in rcu_gp_cleanup()
2153 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2154 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
2155 if (!rnp->parent) in rcu_gp_cleanup()
2158 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2160 /* smp_mb() provided by prior unlock-lock pair. */ in rcu_gp_cleanup()
2164 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { in rcu_gp_cleanup()
2176 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ in rcu_gp_cleanup()
2185 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
2186 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2196 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records in rcu_gp_cleanup()
2200 // hold the ->nocb_lock needed to safely access an offloaded in rcu_gp_cleanup()
2201 // ->cblist.  We do not want to acquire that lock because in rcu_gp_cleanup()
2211 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.  in rcu_gp_cleanup()
2213 // ->gp_flags bits. in rcu_gp_cleanup()
2235 /* Handle grace-period start. */ in rcu_gp_kthread()
2255 /* Handle quiescent-state forcing. */ in rcu_gp_kthread()
2258 /* Handle grace-period end. */ in rcu_gp_kthread()
2267 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2268 * another grace period is required. Whether we wake the grace-period
2269 * kthread or it awakens itself for the next round of quiescent-state
2270 * forcing, that kthread will clean up after the just-completed grace
2271 * period. Note that the caller must hold rnp->lock, which is released
2275 __releases(rcu_get_root()->lock) in rcu_report_qs_rsp()
2285 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2290 * is the grace-period snapshot, which means that the quiescent states
2291 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2294 * As a special case, if mask is zero, the bit-already-cleared check is
2296 * during grace-period initialization.
2300 __releases(rnp->lock) in rcu_report_qs_rnp()
2309 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
2321 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp()
2322 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
2323 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2324 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2325 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2326 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2332 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
2333 mask = rnp->grpmask; in rcu_report_qs_rnp()
2334 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2336 /* No more levels. Exit loop holding root lock. */ in rcu_report_qs_rnp()
2342 rnp = rnp->parent; in rcu_report_qs_rnp()
2344 oldmask = READ_ONCE(rnp_c->qsmask); in rcu_report_qs_rnp()
2352 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ in rcu_report_qs_rnp()
2358 * RCU grace period. The caller must hold the corresponding rnp->lock with
2359 * irqs disabled, and this lock is released upon return, but irqs remain
2364 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2373 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
2378 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2379 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2389 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ in rcu_report_unblock_qs_rnp()
2390 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2391 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2408 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2409 rnp = rdp->mynode; in rcu_report_qs_rdp()
2411 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2412 rdp->gpwrap) { in rcu_report_qs_rdp()
2420 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2424 mask = rdp->grpmask; in rcu_report_qs_rdp()
2425 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2426 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2445 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2446 /* ^^^ Released rnp->lock */ in rcu_report_qs_rdp()
2459 /* Check for grace-period ends and beginnings. */ in rcu_check_quiescent_state()
2466 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2473 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2483 /* Return true if callback-invocation time limit exceeded. */
2497 * period. Throttle as specified by rdp->blimit.
2514 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2516 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2518 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2530 * completion (materialized by rnp->gp_seq update) thanks to the in rcu_do_batch()
2537 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); in rcu_do_batch()
2539 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; in rcu_do_batch()
2540 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2541 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && in rcu_do_batch()
2552 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2553 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2555 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2557 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2573 f = rhp->func; in rcu_do_batch()
2575 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); in rcu_do_batch()
2600 // But rcuc kthreads can delay quiescent-state in rcu_do_batch()
2602 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && in rcu_do_batch()
2604 rdp->rcu_cpu_has_work = 1; in rcu_do_batch()
2611 rdp->n_cbs_invoked += count; in rcu_do_batch()
2616 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2617 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2620 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2621 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2622 rdp->blimit = blimit; in rcu_do_batch()
2624 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ in rcu_do_batch()
2625 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2626 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2627 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2628 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2629 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2635 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2639 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2640 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2648 * This function is invoked from each scheduling-clock interrupt,
2649 * and checks to see if this CPU is in a non-context-switch quiescent
2664 trace_rcu_utilization(TPS("Start scheduler-tick")); in rcu_sched_clock_irq()
2667 /* The load-acquire pairs with the store-release setting to true. */ in rcu_sched_clock_irq()
2683 trace_rcu_utilization(TPS("End scheduler-tick")); in rcu_sched_clock_irq()
2690 * Otherwise, invoke the specified function to check dyntick state for
2707 rcu_state.cbovldnext |= !!rnp->cbovldmask; in force_qs_rnp()
2708 if (rnp->qsmask == 0) { in force_qs_rnp()
2713 * priority-boost blocked readers. in force_qs_rnp()
2716 /* rcu_initiate_boost() releases rnp->lock */ in force_qs_rnp()
2722 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { in force_qs_rnp()
2729 mask |= rdp->grpmask; in force_qs_rnp()
2733 rsmask |= rdp->grpmask; in force_qs_rnp()
2736 /* Idle/offline CPUs, report (releases rnp->lock). */ in force_qs_rnp()
2737 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2739 /* Nothing to do here, so just drop the lock. */ in force_qs_rnp()
2749 * Force quiescent states on reluctant CPUs, and also detect which
2750 * CPUs are in dyntick-idle mode.
2763 for (; rnp != NULL; rnp = rnp->parent) { in rcu_force_quiescent_state()
2765 !raw_spin_trylock(&rnp->fqslock); in rcu_force_quiescent_state()
2767 raw_spin_unlock(&rnp_old->fqslock); in rcu_force_quiescent_state()
2774 /* Reached the root of the rcu_node tree, acquire lock. */ in rcu_force_quiescent_state()
2776 raw_spin_unlock(&rnp_old->fqslock); in rcu_force_quiescent_state()
2800 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2805 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2820 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { in rcu_core()
2822 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2830 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2833 /* Re-invoke RCU core processing if there are callbacks remaining. */ in rcu_core()
2834 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_core()
2844 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2899 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2946 * Spawn per-CPU RCU core processing kthreads.
2957 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); in rcu_spawn_core_kthreads()
2963 rcu_segcblist_enqueue(&rdp->cblist, head); in rcutree_enqueue()
2965 rcu_segcblist_n_cbs(&rdp->cblist)); in rcutree_enqueue()
2966 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in rcutree_enqueue()
2970 * Handle any core-RCU processing required by a call_rcu() invocation.
2978 * core in order to force a re-evaluation of RCU's idleness. in call_rcu_core()
2994 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in call_rcu_core()
2995 rdp->qlen_last_fqs_check + qhimark)) { in call_rcu_core()
3002 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in call_rcu_core()
3005 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in call_rcu_core()
3006 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in call_rcu_core()
3007 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in call_rcu_core()
3009 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in call_rcu_core()
3010 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in call_rcu_core()
3016 * RCU callback function to leak a callback.
3024 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3026 * structure's ->lock.
3033 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
3034 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
3036 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
3041 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3045 * Note that this function ignores the possibility that there are a lot
3047 * grace periods. This omission is due to the need for no-CBs CPUs to
3048 * be holding ->nocb_lock to do this check, which is too heavy for a
3049 * common-case operation.
3053 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
3056 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
3057 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
3073 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); in __call_rcu_common()
3086 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in __call_rcu_common()
3089 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu_common()
3092 head->func = func; in __call_rcu_common()
3093 head->next = NULL; in __call_rcu_common()
3103 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu_common()
3109 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu_common()
3110 rcu_segcblist_init(&rdp->cblist); in __call_rcu_common()
3127 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3128 * flush all lazy callbacks (including the new one) to the main ->cblist while
3132 * @func: actual callback function to be invoked after the grace period
3134 * The callback function will be invoked some time after a full grace
3135 * period elapses, in other words after all pre-existing RCU read-side
3141 * This function will cause callbacks to be invoked sooner than later at the
3142 * expense of extra power. Other than that, this function is identical to, and
3156 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3158 * ->cblist to prevent starting of grace periods too soon.
3162 * @func: actual callback function to be invoked after the grace period
3164 * The callback function will be invoked some time after a full grace
3165 * period elapses, in other words after all pre-existing RCU read-side
3166 * critical sections have completed. However, the callback function
3167 * might well execute concurrently with RCU read-side critical sections
3171 * a different callback function, from within its callback function.
3172 * The specified function will be invoked after another full grace period
3176 * RCU read-side critical sections are delimited by rcu_read_lock()
3179 * or softirqs have been disabled also serve as RCU read-side critical
3184 * all pre-existing RCU read-side critical section. On systems with more
3187 * last RCU read-side critical section whose beginning preceded the call
3188 * to call_rcu(). It also means that each CPU executing an RCU read-side
3191 * of that RCU read-side critical section. Note that these guarantees
3196 * resulting RCU callback function "func()", then both CPU A and CPU B are
3198 * between the call to call_rcu() and the invocation of "func()" -- even
3202 * Implementation of these memory-ordering guarantees is described here:
3203 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3208 * callback. This delay can significantly improve energy-efficiency
3209 * on low-utilization battery-powered devices. To avoid this delay,
3210 * in latency-sensitive kernel code, use call_rcu_hurry().
3219 * During early boot, any blocking grace-period wait automatically
3226 * grace-period optimization is ignored once the scheduler is running.
3238 * Helper function for the synchronize_rcu() API.
3275 * synchronize_rcu - wait until a grace period has elapsed.
3279 * read-side critical sections have completed. Note, however, that
3281 * concurrently with new RCU read-side critical sections that began while
3284 * RCU read-side critical sections are delimited by rcu_read_lock()
3287 * or softirqs have been disabled also serve as RCU read-side critical
3291 * Note that this guarantee implies further memory-ordering guarantees.
3294 * the end of its last RCU read-side critical section whose beginning
3296 * an RCU read-side critical section that extends beyond the return from
3299 * that RCU read-side critical section. Note that these guarantees include
3306 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3309 * Implementation of these memory-ordering guarantees is described here:
3310 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3320 "Illegal synchronize_rcu() in RCU read-side critical section"); in synchronize_rcu()
3334 // reuse of ->gp_seq_polled_snap. in synchronize_rcu()
3338 // Update the normal grace-period counters to record in synchronize_rcu()
3345 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) in synchronize_rcu()
3346 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in synchronize_rcu()
3352 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3361 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; in get_completed_synchronize_rcu_full()
3362 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; in get_completed_synchronize_rcu_full()
3367 * get_state_synchronize_rcu - Snapshot current RCU state
3376 * Any prior manipulation of RCU-protected data must happen in get_state_synchronize_rcu()
3377 * before the load from ->gp_seq. in get_state_synchronize_rcu()
3385 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3386 * @rgosp: location to place combined normal/expedited grace-period state
3388 * Places the normal and expedited grace-period states in @rgosp. This
3403 * Any prior manipulation of RCU-protected data must happen in get_state_synchronize_rcu_full()
3404 * before the loads from ->gp_seq and ->expedited_sequence. in get_state_synchronize_rcu_full()
3408 // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use in get_state_synchronize_rcu_full()
3410 // the latter here would result in too-short grace periods due to in get_state_synchronize_rcu_full()
3412 rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq); in get_state_synchronize_rcu_full()
3413 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); in get_state_synchronize_rcu_full()
3418 * Helper function for start_poll_synchronize_rcu() and
3430 rnp = rdp->mynode; in start_poll_synchronize_rcu_common()
3437 // from which they are updated at grace-period start, as required. in start_poll_synchronize_rcu_common()
3445 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3463 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3466 * Places the normal and expedited grace-period states in *@rgos. This
3482 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3488 * function later on until it does return @true. Alternatively, the caller
3494 * Yes, this function does not take counter wrap into account.
3496 * more than a billion grace periods (and way more on a 64-bit system!).
3498 * (many hours even on 32-bit systems) should check them occasionally and
3501 * to get a guaranteed-completed grace-period state.
3503 * In addition, because oldstate compresses the grace-period state for
3509 * This function provides the same memory-ordering guarantees that
3511 * to the function that provided @oldstate, and that returned at the end
3512 * of this function.
3526 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3532 * function later on until it does return @true. Alternatively, the caller
3536 * Yes, this function does not take counter wrap into account.
3538 * for more than a billion grace periods (and way more on a 64-bit
3540 * long time periods (many hours even on 32-bit systems) should check
3543 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3544 * grace-period state.
3546 * This function provides the same memory-ordering guarantees that would
3548 * the function that provided @rgosp, and that returned at the end of this
3549 * function. And this guarantee requires that the root rcu_node structure's
3550 * ->gp_seq field be checked instead of that of the rcu_state structure.
3551 * The problem is that the just-ending grace-period's callbacks can be
3552 * invoked between the time that the root rcu_node structure's ->gp_seq
3553 * field is updated and the time that the rcu_state structure's ->gp_seq
3562 smp_mb(); // Order against root rcu_node structure grace-period cleanup. in poll_state_synchronize_rcu_full()
3563 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || in poll_state_synchronize_rcu_full()
3564 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || in poll_state_synchronize_rcu_full()
3565 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || in poll_state_synchronize_rcu_full()
3566 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { in poll_state_synchronize_rcu_full()
3575 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3582 * Yes, this function does not take counter wrap into account.
3584 * more than 2 billion grace periods (and way more on a 64-bit system!),
3587 * This function provides the same memory-ordering guarantees that
3589 * to the function that provided @oldstate and that returned at the end
3590 * of this function.
3600 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3609 * Yes, this function does not take counter wrap into account.
3611 * more than 2 billion grace periods (and way more on a 64-bit system!),
3614 * This function provides the same memory-ordering guarantees that
3616 * to the function that provided @rgosp and that returned at the end of
3617 * this function.
3627 * Check to see if there is any immediate RCU-related work to be done by
3630 * CPU-local state are performed first. However, we must check for CPU
3637 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3658 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3663 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3667 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3669 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3673 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3674 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3682 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3692 * RCU callback function for rcu_barrier(). If we are last, wake
3705 rhp->next = rhp; // Mark the callback as having been invoked. in rcu_barrier_callback()
3707 rcu_barrier_trace(TPS("LastCB"), -1, s); in rcu_barrier_callback()
3710 rcu_barrier_trace(TPS("CB"), -1, s); in rcu_barrier_callback()
3715 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3720 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); in rcu_barrier_entrain()
3727 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3728 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_entrain()
3729 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_entrain()
3736 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3738 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
3739 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_entrain()
3742 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_entrain()
3743 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3748 smp_store_release(&rdp->barrier_seq_snap, gseq); in rcu_barrier_entrain()
3752 * Called with preemption disabled, and from cross-cpu IRQ context.
3760 WARN_ON_ONCE(cpu != rdp->cpu); in rcu_barrier_handler()
3768 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3783 rcu_barrier_trace(TPS("Begin"), -1, s); in rcu_barrier()
3790 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3800 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3804 * to avoid a too-soon return to zero in case of an immediate in rcu_barrier()
3805 * invocation of the just-enqueued callback (or preemption of in rcu_barrier()
3806 * this task). Exclude CPU-hotplug operations to ensure that no in rcu_barrier()
3807 * offline non-offloaded CPU has callbacks queued. in rcu_barrier()
3821 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) in rcu_barrier()
3824 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { in rcu_barrier()
3825 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
3832 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
3842 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
3857 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3863 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
3874 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3879 * rcu_barrier() system-wide from use of this function, which means that
3889 * given .set() function, but should concurrent .set() invocation ever be
3922 return -EAGAIN; in param_set_do_rcu_barrier()
3925 atomic_inc((atomic_t *)kp->arg); in param_set_do_rcu_barrier()
3927 atomic_dec((atomic_t *)kp->arg); in param_set_do_rcu_barrier()
3937 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg)); in param_get_do_rcu_barrier()
3949 * This will not be stable unless the rcu_node structure's ->lock is
3955 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
3961 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3965 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); in rcu_rdp_cpu_online()
4001 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask in rcu_lockdep_current_cpu_online()
4024 * and all tasks that were preempted within an RCU read-side critical
4026 * read-side critical section. Some other CPU is reporting this fact with
4027 * the specified rcu_node structure's ->lock held and interrupts disabled.
4028 * This function therefore goes up the tree of rcu_node structures,
4029 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
4030 * the leaf rcu_node structure's ->qsmaskinit field has already been
4033 * This function does check that the specified rcu_node structure has
4036 * a needless lock acquisition. So once it has done its work, don't
4046 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || in rcu_cleanup_dead_rnp()
4050 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
4051 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
4055 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
4057 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
4058 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
4068 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4070 * must hold the corresponding leaf rcu_node ->lock with interrupts
4080 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
4082 mask = rnp->grpmask; in rcu_init_new_rnp()
4083 rnp = rnp->parent; in rcu_init_new_rnp()
4087 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
4088 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
4096 * Do boot-time initialization of a CPU's per-CPU RCU data.
4105 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4106 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4107 WARN_ON_ONCE(ct->nesting != 1); in rcu_boot_init_percpu_data()
4109 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4110 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4111 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4112 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4113 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4114 rdp->last_sched_clock = jiffies; in rcu_boot_init_percpu_data()
4115 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4142 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_exp_par_gp_kworker()
4144 if (rnp->exp_kworker) in rcu_spawn_exp_par_gp_kworker()
4149 pr_err("Failed to create par gp kworker on %d/%d\n", in rcu_spawn_exp_par_gp_kworker()
4150 rnp->grplo, rnp->grphi); in rcu_spawn_exp_par_gp_kworker()
4153 WRITE_ONCE(rnp->exp_kworker, kworker); in rcu_spawn_exp_par_gp_kworker()
4156 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param); in rcu_spawn_exp_par_gp_kworker()
4158 rcu_thread_affine_rnp(kworker->task, rnp); in rcu_spawn_exp_par_gp_kworker()
4159 wake_up_process(kworker->task); in rcu_spawn_exp_par_gp_kworker()
4169 pr_err("Failed to create %s!\n", name); in rcu_start_exp_gp_kworker()
4175 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param); in rcu_start_exp_gp_kworker()
4181 mutex_lock(&rnp->kthread_mutex); in rcu_spawn_rnp_kthreads()
4184 mutex_unlock(&rnp->kthread_mutex); in rcu_spawn_rnp_kthreads()
4189 * Invoked early in the CPU-online process, when pretty much all services
4192 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4194 * accept some slop in the rsp->gp_seq access due to the fact that this
4195 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4207 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4208 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4209 rdp->blimit = blimit; in rcutree_prepare_cpu()
4210 ct->nesting = 1; /* CPU not up, no tearing. */ in rcutree_prepare_cpu()
4214 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be in rcutree_prepare_cpu()
4215 * (re-)initialized. in rcutree_prepare_cpu()
4217 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4218 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4221 * Add CPU to leaf rcu_node pending-online bitmask. Any needed in rcutree_prepare_cpu()
4225 rnp = rdp->mynode; in rcutree_prepare_cpu()
4227 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4228 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4229 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4230 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4231 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4232 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4233 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4234 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4251 return smp_load_acquire(&rdp->beenonline); in rcu_cpu_beenfullyonline()
4255 * Near the end of the CPU-online process. Pretty much all services
4265 rnp = rdp->mynode; in rcutree_online_cpu()
4267 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4273 // Stop-machine done, so allow nohz_full to disable tick. in rcutree_online_cpu()
4281 * incoming CPUs are not allowed to use RCU read-side critical sections
4282 * until this function is called. Failing to observe this restriction
4285 * Note that this function is special in that it is invoked directly
4287 * This is because this function must be invoked at a precise location.
4301 if (rdp->cpu_started) in rcutree_report_cpu_starting()
4303 rdp->cpu_started = true; in rcutree_report_cpu_starting()
4305 rnp = rdp->mynode; in rcutree_report_cpu_starting()
4306 mask = rdp->grpmask; in rcutree_report_cpu_starting()
4311 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); in rcutree_report_cpu_starting()
4313 newcpu = !(rnp->expmaskinitnext & mask); in rcutree_report_cpu_starting()
4314 rnp->expmaskinitnext |= mask; in rcutree_report_cpu_starting()
4318 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcutree_report_cpu_starting()
4319 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_starting()
4320 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_starting()
4323 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ in rcutree_report_cpu_starting()
4329 /* Report QS -after- changing ->qsmaskinitnext! */ in rcutree_report_cpu_starting()
4330 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcutree_report_cpu_starting()
4335 smp_store_release(&rdp->beenonline, true); in rcutree_report_cpu_starting()
4336 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ in rcutree_report_cpu_starting()
4340 * The outgoing function has no further need of RCU, so remove it from
4341 * the rcu_node tree's ->qsmaskinitnext bit masks.
4343 * Note that this function is special in that it is invoked directly
4345 * This is because this function must be invoked at a precise location.
4354 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_report_cpu_dead()
4358 * may introduce a new READ-side while it is actually off the QS masks. in rcutree_report_cpu_dead()
4367 mask = rdp->grpmask; in rcutree_report_cpu_dead()
4369 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcutree_report_cpu_dead()
4370 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_dead()
4371 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_dead()
4372 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcutree_report_cpu_dead()
4373 /* Report quiescent state -before- changing ->qsmaskinitnext! */ in rcutree_report_cpu_dead()
4375 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcutree_report_cpu_dead()
4378 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); in rcutree_report_cpu_dead()
4381 rdp->cpu_started = false; in rcutree_report_cpu_dead()
4386 * The outgoing CPU has just passed through the dying-idle state, and we
4402 if (rcu_segcblist_empty(&rdp->cblist)) { in rcutree_migrate_callbacks()
4410 my_rnp = my_rdp->mynode; in rcutree_migrate_callbacks()
4417 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4420 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4421 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); in rcutree_migrate_callbacks()
4434 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4435 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4436 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", in rcutree_migrate_callbacks()
4437 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4438 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()
4450 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); in rcutree_dead_cpu()
4451 // Stop-machine done, so allow nohz_full to disable tick. in rcutree_dead_cpu()
4464 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
4466 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); in rcutree_dying_cpu()
4467 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in rcutree_dying_cpu()
4468 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); in rcutree_dying_cpu()
4483 rnp = rdp->mynode; in rcutree_offline_cpu()
4485 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4488 // nohz_full CPUs need the tick for stop-machine to work quickly in rcutree_offline_cpu()
4495 * On non-huge systems, use expedited RCU grace periods to make suspend
4531 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n in rcu_spawn_gp_kthread()
4545 /* This is a pre-SMP initcall, we expect a single CPU */ in rcu_spawn_gp_kthread()
4548 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() in rcu_spawn_gp_kthread()
4552 rcu_spawn_rnp_kthreads(rdp->mynode); in rcu_spawn_gp_kthread()
4561 * This function is invoked towards the end of the scheduler's
4563 * contain synchronous grace-period primitives (during which time, this idle
4564 * task is booting the system, and such primitives are no-ops). After this
4565 * function is called, any synchronous grace-period primitives are run as
4579 // Fix up the ->gp_seq counters. in rcu_scheduler_starting()
4582 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in rcu_scheduler_starting()
4591 * Helper function for rcu_init() that initializes the rcu_state structure.
4612 /* Initialize the level-tracking arrays. */ in rcu_init_one()
4616 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; in rcu_init_one()
4621 for (i = rcu_num_lvls - 1; i >= 0; i--) { in rcu_init_one()
4625 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); in rcu_init_one()
4626 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), in rcu_init_one()
4628 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
4629 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4631 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
4632 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
4633 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
4634 rnp->qsmask = 0; in rcu_init_one()
4635 rnp->qsmaskinit = 0; in rcu_init_one()
4636 rnp->grplo = j * cpustride; in rcu_init_one()
4637 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4638 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4639 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4641 rnp->grpnum = 0; in rcu_init_one()
4642 rnp->grpmask = 0; in rcu_init_one()
4643 rnp->parent = NULL; in rcu_init_one()
4645 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4646 rnp->grpmask = BIT(rnp->grpnum); in rcu_init_one()
4647 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
4648 j / levelspread[i - 1]; in rcu_init_one()
4650 rnp->level = i; in rcu_init_one()
4651 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4653 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
4654 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
4655 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
4656 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
4657 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
4658 mutex_init(&rnp->kthread_mutex); in rcu_init_one()
4659 raw_spin_lock_init(&rnp->exp_poll_lock); in rcu_init_one()
4660 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in rcu_init_one()
4661 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); in rcu_init_one()
4669 while (i > rnp->grphi) in rcu_init_one()
4671 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4672 per_cpu_ptr(&rcu_data, i)->barrier_head.next = in rcu_init_one()
4673 &per_cpu_ptr(&rcu_data, i)->barrier_head; in rcu_init_one()
4679 * Force priority from the kernel command-line into range.
4696 pr_alert("%s: Limited prio to %d from %d\n", in sanitize_kthread_prio()
4703 * the ->node array in the rcu_state structure.
4729 * value, which is a function of HZ, then adding one for each in rcu_init_geometry()
4739 /* If the compile-time values are accurate, just leave. */ in rcu_init_geometry()
4743 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", in rcu_init_geometry()
4747 * The boot-time rcu_fanout_leaf parameter must be at least two in rcu_init_geometry()
4749 * Complain and fall back to the compile-time values if this in rcu_init_geometry()
4764 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; in rcu_init_geometry()
4768 * If this limit is exceeded, fall back to the compile-time values. in rcu_init_geometry()
4770 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { in rcu_init_geometry()
4783 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; in rcu_init_geometry()
4802 pr_info("rcu_node tree layout dump\n"); in rcu_dump_rcu_node_tree()
4805 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4806 pr_cont("\n"); in rcu_dump_rcu_node_tree()
4808 level = rnp->level; in rcu_dump_rcu_node_tree()
4810 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()
4812 pr_cont("\n"); in rcu_dump_rcu_node_tree()
4833 * We don't need protection against CPU-hotplug here because in rcu_init()
4851 /* -After- the rcu_node ->lock fields are initialized! */ in rcu_init()
4857 // Kick-start in case any polled grace periods started early. in rcu_init()