Lines Matching +full:idle +full:- +full:wait +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
14 * For detailed explanation of Read-Copy Update mechanism see -
44 #include <linux/wait.h>
48 #include <linux/delay.h>
67 #include "../time/tick-internal.h"
86 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
110 /* Control rcu_node-tree auto-balancing at boot time. */
129 * boot-time false positives from lockdep-RCU error checking. Finally, it
142 * currently delay invocation of any RCU callbacks until after this point.
164 * real-time priority(enabling/disabling) is controlled by
170 /* Delay in jiffies for grace-period initialization delays, debug only. */
182 // Add delay to rcu_read_unlock() for strict grace periods.
197 * the delay. The longer the delay, the more the grace periods between
198 * each delay. The reason for this normalization is that it means that,
199 * for non-zero delays, the overall slowdown of grace periods is constant
200 * regardless of the duration of the delay. This arrangement balances
209 * structure's ->lock, but of course results can be subject to change.
224 if (rcu_segcblist_is_enabled(&rdp->cblist))
225 return rcu_segcblist_n_cbs(&rdp->cblist);
230 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
233 * This is a special-purpose function to be used in the softirq
234 * infrastructure and perhaps the occasional long-running softirq
248 * call to do_something() would be guaranteed to wait only until
250 * that same synchronize_rcu() would instead be guaranteed to wait
258 "Illegal rcu_softirq_qs() in RCU read-side critical section");
268 * to the next non-quiescent value.
270 * The non-atomic test/increment sequence works because the upper bits
271 * of the ->state variable are manipulated only by the corresponding CPU,
291 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
307 * performed by the remote CPU after it exits idle.
310 * performed by the remote CPU prior to entering idle and therefore can
316 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
331 return false; // Non-zero, so report failure;
332 smp_rmb(); // Order *vp read and CT state re-read.
345 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
347 * The caller must have disabled interrupts and must not be idle.
355 /* It is illegal to call this from idle state. */
362 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
364 * If the current CPU is idle and running at a first-level (not nested)
365 * interrupt, or directly, from idle, return true.
376 * the idle task, instead of an actual IPI.
392 * If we're not in an interrupt, we must be in the idle task!
396 /* Does CPU appear to be idle from an RCU standpoint? */
411 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
430 * quiescent-state help from rcu_note_context_switch().
438 * Make sure that we give the grace-period kthread time to detect any
439 * idle CPUs before taking active measures to force quiescent states.
457 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
467 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
479 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
514 * numbers mean idle. The value returned will thus be roughly double
532 * Send along grace-period-related data for rcutorture diagnostics.
544 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
561 * get re-enabled again.
567 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
570 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
583 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
601 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
612 * in a timely manner, the RCU grace-period kthread sets that CPU's
613 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
621 * interrupt or exception. In that case, the RCU grace-period kthread
637 if (!tick_nohz_full_cpu(rdp->cpu) ||
638 !READ_ONCE(rdp->rcu_urgent_qs) ||
639 READ_ONCE(rdp->rcu_forced_tick)) {
648 // handler and that the rcu_node lock is an irq-disabled lock
649 // prevents self-deadlock. So we can safely recheck under the lock.
651 raw_spin_lock_rcu_node(rdp->mynode);
652 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
655 WRITE_ONCE(rdp->rcu_forced_tick, true);
656 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
658 raw_spin_unlock_rcu_node(rdp->mynode);
664 * Check to see if any future non-offloaded RCU-related work will need
667 * it is -not- an exported member of the RCU API. This is used by
668 * the idle-entry code to figure out whether it is safe to disable the
669 * scheduler-clock interrupt.
671 * Just check whether or not this CPU has non-offloaded RCU callbacks
676 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
682 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
687 raw_lockdep_assert_held_rcu_node(rdp->mynode);
688 WRITE_ONCE(rdp->rcu_urgent_qs, false);
689 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
690 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
691 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
692 WRITE_ONCE(rdp->rcu_forced_tick, false);
697 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
700 * An @true return means that this CPU can safely enter RCU read-side
705 * current CPU is deep within its idle loop, in kernel entry/exit code,
743 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
744 * After all, the CPU might be in deep idle state, and thus executing no
750 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
751 rnp->gp_seq))
752 WRITE_ONCE(rdp->gpwrap, true);
753 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
754 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
760 * is in dynticks idle mode, which is an extended quiescent state.
765 * Full ordering between remote CPU's post idle accesses and updater's
772 * Ordering between remote CPU's pre idle accesses and post grace period
775 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
776 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
777 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
778 rcu_gpnum_ovf(rdp->mynode, rdp);
786 * by virtue of being in or having passed through an dynticks idle state since
798 struct rcu_node *rnp = rdp->mynode;
801 * If the CPU passed through or entered a dynticks idle phase with
805 * read-side critical section that started before the beginning
808 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
809 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
818 * the CPU-offline process, or, failing that, by rcu_gp_init()
820 * last task on a leaf rcu_node structure exiting its RCU read-side
825 * The rcu_node structure's ->lock is held here, which excludes
826 * the relevant portions the CPU-hotplug code, the grace-period
835 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
836 __func__, rnp->grplo, rnp->grphi, rnp->level,
837 (long)rnp->gp_seq, (long)rnp->completedqs);
838 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
839 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
840 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
842 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
843 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
844 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
850 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
853 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
860 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
864 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
866 smp_store_release(&rdp->rcu_urgent_qs, true);
868 WRITE_ONCE(rdp->rcu_urgent_qs, true);
872 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
874 * And some in-kernel loops check need_resched() before calling
876 * running in-kernel with scheduling-clock interrupts disabled.
879 if (tick_nohz_full_cpu(rdp->cpu) &&
880 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
882 WRITE_ONCE(rdp->rcu_urgent_qs, true);
883 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
884 ret = -1;
888 * If more than halfway to RCU CPU stall-warning time, invoke
896 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
897 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
898 ret = -1;
901 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
902 (rnp->ffmask & rdp->grpmask)) {
903 rdp->rcu_iw_pending = true;
904 rdp->rcu_iw_gp_seq = rnp->gp_seq;
905 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
908 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
909 int cpu = rdp->cpu;
915 rsrp = &rdp->snap_record;
916 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
917 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
918 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
919 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
920 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
921 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
922 rsrp->jiffies = jiffies;
923 rsrp->gp_seq = rdp->gp_seq;
930 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
934 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
935 gp_seq_req, rnp->level,
936 rnp->grplo, rnp->grphi, s);
940 * rcu_start_this_gp - Request the start of a particular grace period
947 * rcu_node structure's ->gp_seq_needed field. Returns true if there
948 * is reason to awaken the grace-period kthread.
950 * The caller must hold the specified rcu_node structure's ->lock, which
951 * is why the caller is responsible for waking the grace-period kthread.
964 * has already been recorded -- or if that grace period has in
966 * progress in a non-leaf node, no recording is needed because the
968 * Note that rnp_start->lock must not be released.
972 for (rnp = rnp_start; 1; rnp = rnp->parent) {
975 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
976 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
978 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
983 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
984 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
995 if (rnp != rnp_start && rnp->parent != NULL)
997 if (!rnp->parent)
1017 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1018 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1019 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1027 * Clean up any old requests for the just-ended grace period. Also return
1035 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1037 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1038 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1056 * is dangerous that late in the CPU-down hotplug process. The
1076 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1078 * sleep upon return, resulting in a grace-period hang), and don't bother
1079 * awakening when there is nothing for the grace-period kthread to do
1084 * So why do the self-wakeup when in an interrupt or softirq handler
1085 * in the grace-period kthread's context? Because the kthread might have
1087 * pre-sleep check of the awaken condition. In this case, a wakeup really
1103 * If there is room, assign a ->gp_seq number to any callbacks on this
1105 * that were previously assigned a ->gp_seq number that has since proven
1107 * ->gp_seq number while RCU is idle, but with reference to a non-root
1110 * the RCU grace-period kthread.
1112 * The caller must hold rnp->lock with interrupts disabled.
1123 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1126 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1129 * Callbacks are often registered with incomplete grace-period
1135 * accelerating callback invocation to an earlier grace-period
1139 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1143 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1148 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1155 * rcu_node structure's ->lock be held. It consults the cached value
1156 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1157 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1158 * while holding the leaf rcu_node structure's ->lock.
1168 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1170 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1183 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1185 * invoke it repeatedly. As long as it is not invoked -too- often...
1186 * Returns true if the RCU grace-period kthread needs to be awakened.
1188 * The caller must hold rnp->lock with interrupts disabled.
1196 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1200 * Find all callbacks whose ->gp_seq numbers indicate that they
1203 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1211 * that the RCU grace-period kthread be awakened.
1217 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1220 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1239 * Update CPU-local rcu_data state to record the beginnings and ends of
1240 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1242 * Returns true if the grace-period kthread needs to be awakened.
1252 if (rdp->gp_seq == rnp->gp_seq)
1256 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1257 unlikely(READ_ONCE(rdp->gpwrap))) {
1260 rdp->core_needs_qs = false;
1261 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1265 if (rdp->core_needs_qs)
1266 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1269 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1270 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1271 unlikely(READ_ONCE(rdp->gpwrap))) {
1277 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1278 need_qs = !!(rnp->qsmask & rdp->grpmask);
1279 rdp->cpu_no_qs.b.norm = need_qs;
1280 rdp->core_needs_qs = need_qs;
1283 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1284 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1285 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1286 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1287 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1288 WRITE_ONCE(rdp->gpwrap, false);
1300 rnp = rdp->mynode;
1301 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1302 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1316 /* Register a counter to suppress debugging grace-period delays. */
1341 static void rcu_gp_slow(int delay)
1343 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1344 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1345 schedule_timeout_idle(delay);
1350 /* Allow rcutorture to stall the grace-period kthread. */
1358 /* Actually implement the aforementioned wait. */
1369 pr_alert("%s: Wait complete\n", __func__);
1390 // If RCU was idle, note beginning of GP.
1407 // end of that GP. Either way, zero counter to avoid counter-wrap
1457 * wait tail: Tracks the set of nodes, which need to
1458 * wait for the current GP to complete.
1464 * At every grace period init, a new wait node is added
1465 * to the llist. This wait node is used as wait tail
1467 * number of wait nodes, if all wait nodes are in use
1473 * in the llist should be used as a wait-tail for this
1474 * grace period, therefore users which should wait due
1478 * Below is an illustration of how the done and wait
1486 * +----------+ +--------+ +-------+
1488 * | head |---------> | cb2 |--------->| cb1 |
1490 * +----------+ +--------+ +-------+
1496 * WAIT TAIL
1500 * +----------+ +--------+ +--------+ +-------+
1502 * | head ------> wait |------> cb2 |------> | cb1 |
1504 * +----------+ +--------+ +--------+ +-------+
1516 * +----------+ +--------+ +--------+ +-------+
1518 * | head ------> wait |------> cb2 |------> | cb1 |
1520 * +----------+ +--------+ +--------+ +-------+
1526 * WAIT TAIL DONE TAIL
1530 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1532 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1534 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1545 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1547 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1549 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1556 * to use the rel-acq semantics. If the concurrent kworker
1571 * +----------+ +--------+
1573 * | head ------> wait |
1575 * +----------+ +--------+
1581 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
1592 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1))
1593 return &sr_wn->node;
1603 atomic_set_release(&sr_wn->inuse, 0);
1615 unsigned long oldstate = (unsigned long) rs->head.func;
1623 complete(&rs->completion);
1635 * follow acq-rel semantics.
1646 head = done->next;
1647 done->next = NULL;
1651 * done tail which is acq-read above is not removed
1690 llist_for_each_safe(rcu, next, wait_tail->next) {
1696 wait_tail->next = next;
1704 * wait head if no inflight-workers. If there are in-flight workers,
1705 * they will remove the last wait head.
1709 if (wait_tail->next && wait_tail->next->next == NULL &&
1710 rcu_sr_is_wait_head(wait_tail->next) &&
1712 rcu_sr_put_wait_head(wait_tail->next);
1713 wait_tail->next = NULL;
1722 * of outstanding users(if still left) and releasing wait-heads
1725 if (wait_tail->next) {
1752 /* Inject a wait-dummy-node. */
1757 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(),
1769 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
1816 * separator to the llist, because there were no left any dummy-nodes.
1818 * Number of dummy-nodes is fixed, it could be that we are run out of
1826 * Apply per-leaf buffered online and offline operations to
1828 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1840 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1841 !rnp->wait_blkd_tasks) {
1849 /* Record old state, apply changes to ->qsmaskinit field. */
1850 oldmask = rnp->qsmaskinit;
1851 rnp->qsmaskinit = rnp->qsmaskinitnext;
1853 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1854 if (!oldmask != !rnp->qsmaskinit) {
1856 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1859 rnp->wait_blkd_tasks = true; /* blocked tasks */
1866 * If all waited-on tasks from prior grace period are
1869 * clear ->wait_blkd_tasks. Otherwise, if one of this
1871 * simply clear ->wait_blkd_tasks.
1873 if (rnp->wait_blkd_tasks &&
1874 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1875 rnp->wait_blkd_tasks = false;
1876 if (!rnp->qsmaskinit)
1887 * Set the quiescent-state-needed bits in all the rcu_node
1888 * structures for all currently online CPUs in breadth-first
1904 rnp->qsmask = rnp->qsmaskinit;
1905 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1906 if (rnp == rdp->mynode)
1909 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1910 rnp->level, rnp->grplo,
1911 rnp->grphi, rnp->qsmask);
1912 /* Quiescent states for tasks on any now-offline CPUs. */
1913 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1914 rnp->rcu_gp_init_mask = mask;
1915 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1916 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1931 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1942 // Someone like call_rcu() requested a force-quiescent-state scan.
1948 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1955 * Do one round of quiescent-state forcing.
1972 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1976 /* Collect dyntick-idle snapshots. */
1979 /* Handle dyntick-idle and offline CPUs. */
1982 /* Clear flag to prevent immediate re-entry. */
1991 * Loop doing repeated quiescent-state forcing until the grace period ends.
2032 * is required only for single-node rcu_node trees because readers blocking
2034 * For multi-node trees, checking the root node's ->qsmask suffices, because a
2035 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
2038 if (!READ_ONCE(rnp->qsmask) &&
2041 /* If time for quiescent-state forcing, do it. */
2056 ret = 0; /* Force full wait till next FQS. */
2070 j = rcu_state.jiffies_force_qs - j;
2093 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2109 * Propagate new ->gp_seq value to rcu_node structures so that
2110 * other CPUs don't have to wait until the start of the next grace
2112 * RCU grace-period initialization races by forcing the end of
2123 WARN_ON_ONCE(rnp->qsmask);
2124 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2125 if (!rnp->parent)
2128 if (rnp == rdp->mynode)
2130 /* smp_mb() provided by prior unlock-lock pair. */
2134 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2146 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2155 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2156 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2166 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
2170 // hold the ->nocb_lock needed to safely access an offloaded
2171 // ->cblist.  We do not want to acquire that lock because
2181 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
2183 // ->gp_flags bits.
2205 /* Handle grace-period start. */
2225 /* Handle quiescent-state forcing. */
2228 /* Handle grace-period end. */
2237 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2238 * another grace period is required. Whether we wake the grace-period
2239 * kthread or it awakens itself for the next round of quiescent-state
2240 * forcing, that kthread will clean up after the just-completed grace
2241 * period. Note that the caller must hold rnp->lock, which is released
2245 __releases(rcu_get_root()->lock)
2260 * is the grace-period snapshot, which means that the quiescent states
2261 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2264 * As a special case, if mask is zero, the bit-already-cleared check is
2266 * during grace-period initialization.
2270 __releases(rnp->lock)
2279 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2291 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2292 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2293 mask, rnp->qsmask, rnp->level,
2294 rnp->grplo, rnp->grphi,
2295 !!rnp->gp_tasks);
2296 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2302 rnp->completedqs = rnp->gp_seq;
2303 mask = rnp->grpmask;
2304 if (rnp->parent == NULL) {
2312 rnp = rnp->parent;
2314 oldmask = READ_ONCE(rnp_c->qsmask);
2322 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2328 * RCU grace period. The caller must hold the corresponding rnp->lock with
2334 __releases(rnp->lock)
2343 rnp->qsmask != 0) {
2348 rnp->completedqs = rnp->gp_seq;
2349 rnp_p = rnp->parent;
2359 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2360 gps = rnp->gp_seq;
2361 mask = rnp->grpmask;
2378 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2379 rnp = rdp->mynode;
2381 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2382 rdp->gpwrap) {
2390 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2394 mask = rdp->grpmask;
2395 rdp->core_needs_qs = false;
2396 if ((rnp->qsmask & mask) == 0) {
2415 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2416 /* ^^^ Released rnp->lock */
2429 /* Check for grace-period ends and beginnings. */
2436 if (!rdp->core_needs_qs)
2441 * period? If no, then exit and wait for the next call.
2443 if (rdp->cpu_no_qs.b.norm)
2453 /* Return true if callback-invocation time limit exceeded. */
2467 * period. Throttle as specified by rdp->blimit.
2484 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2486 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2488 !rcu_segcblist_empty(&rdp->cblist),
2500 * completion (materialized by rnp->gp_seq update) thanks to the
2507 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2509 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2510 bl = max(rdp->blimit, pending >> div);
2511 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2522 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2523 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2525 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2527 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2543 f = rhp->func;
2545 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2570 // But rcuc kthreads can delay quiescent-state
2572 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2574 rdp->rcu_cpu_has_work = 1;
2581 rdp->n_cbs_invoked += count;
2586 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2587 rcu_segcblist_add_len(&rdp->cblist, -count);
2590 count = rcu_segcblist_n_cbs(&rdp->cblist);
2591 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2592 rdp->blimit = blimit;
2594 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2595 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2596 rdp->qlen_last_fqs_check = 0;
2597 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2598 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2599 rdp->qlen_last_fqs_check = count;
2605 empty = rcu_segcblist_empty(&rdp->cblist);
2609 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2610 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2618 * This function is invoked from each scheduling-clock interrupt,
2619 * and checks to see if this CPU is in a non-context-switch quiescent
2620 * state, for example, user mode or idle loop. It also schedules RCU
2634 trace_rcu_utilization(TPS("Start scheduler-tick"));
2637 /* The load-acquire pairs with the store-release setting to true. */
2639 /* Idle and userspace execution already are quiescent states. */
2653 trace_rcu_utilization(TPS("End scheduler-tick"));
2677 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2678 if (rnp->qsmask == 0) {
2683 * priority-boost blocked readers.
2686 /* rcu_initiate_boost() releases rnp->lock */
2692 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2699 mask |= rdp->grpmask;
2703 rsmask |= rdp->grpmask;
2706 /* Idle/offline CPUs, report (releases rnp->lock). */
2707 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2720 * CPUs are in dyntick-idle mode.
2733 for (; rnp != NULL; rnp = rnp->parent) {
2735 !raw_spin_trylock(&rnp->fqslock);
2737 raw_spin_unlock(&rnp_old->fqslock);
2746 raw_spin_unlock(&rnp_old->fqslock);
2770 struct rcu_node *rnp = rdp->mynode;
2775 WARN_ON_ONCE(!rdp->beenonline);
2790 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
2792 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2800 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2803 /* Re-invoke RCU core processing if there are callbacks remaining. */
2804 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2814 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2826 * is invoked from idle
2869 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2916 * Spawn per-CPU RCU core processing kthreads.
2933 rcu_segcblist_enqueue(&rdp->cblist, head);
2937 rcu_segcblist_n_cbs(&rdp->cblist));
2940 rcu_segcblist_n_cbs(&rdp->cblist));
2941 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2945 * Handle any core-RCU processing required by a call_rcu() invocation.
2953 * core in order to force a re-evaluation of RCU's idleness.
2969 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2970 rdp->qlen_last_fqs_check + qhimark)) {
2977 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2980 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2981 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2982 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2984 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2985 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2999 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3001 * structure's ->lock.
3008 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
3009 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3011 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3016 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3022 * grace periods. This omission is due to the need for no-CBs CPUs to
3023 * be holding ->nocb_lock to do this check, which is too heavy for a
3024 * common-case operation.
3028 struct rcu_node *const rnp = rdp->mynode;
3031 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3032 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3048 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3057 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
3060 WRITE_ONCE(head->func, rcu_leak_callback);
3063 head->func = func;
3064 head->next = NULL;
3074 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3080 if (rcu_segcblist_empty(&rdp->cblist))
3081 rcu_segcblist_init(&rdp->cblist);
3098 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3099 * flush all lazy callbacks (including the new one) to the main ->cblist while
3106 * period elapses, in other words after all pre-existing RCU read-side
3111 * memory pressure and on systems which are lightly loaded or mostly idle.
3127 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3129 * ->cblist to prevent starting of grace periods too soon.
3136 * period elapses, in other words after all pre-existing RCU read-side
3138 * might well execute concurrently with RCU read-side critical sections
3141 * RCU read-side critical sections are delimited by rcu_read_lock()
3144 * or softirqs have been disabled also serve as RCU read-side critical
3149 * all pre-existing RCU read-side critical section. On systems with more
3152 * last RCU read-side critical section whose beginning preceded the call
3153 * to call_rcu(). It also means that each CPU executing an RCU read-side
3156 * of that RCU read-side critical section. Note that these guarantees
3157 * include CPUs that are offline, idle, or executing in user mode, as
3163 * between the call to call_rcu() and the invocation of "func()" -- even
3167 * Implementation of these memory-ordering guarantees is described here:
3168 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3177 * During early boot, any blocking grace-period wait automatically
3184 * grace-period optimization is ignored once the scheduler is running.
3224 /* Now we can wait. */
3233 * synchronize_rcu - wait until a grace period has elapsed.
3237 * read-side critical sections have completed. Note, however, that
3239 * concurrently with new RCU read-side critical sections that began while
3242 * RCU read-side critical sections are delimited by rcu_read_lock()
3245 * or softirqs have been disabled also serve as RCU read-side critical
3249 * Note that this guarantee implies further memory-ordering guarantees.
3252 * the end of its last RCU read-side critical section whose beginning
3254 * an RCU read-side critical section that extends beyond the return from
3257 * that RCU read-side critical section. Note that these guarantees include
3258 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3264 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3267 * Implementation of these memory-ordering guarantees is described here:
3268 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3278 "Illegal synchronize_rcu() in RCU read-side critical section");
3292 // reuse of ->gp_seq_polled_snap.
3296 // Update the normal grace-period counters to record
3303 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3304 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3310 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3319 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3320 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3325 * get_state_synchronize_rcu - Snapshot current RCU state
3334 * Any prior manipulation of RCU-protected data must happen
3335 * before the load from ->gp_seq.
3343 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3344 * @rgosp: location to place combined normal/expedited grace-period state
3346 * Places the normal and expedited grace-period states in @rgosp. This
3363 * Any prior manipulation of RCU-protected data must happen
3364 * before the loads from ->gp_seq and ->expedited_sequence.
3367 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3368 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3385 rnp = rdp->mynode;
3392 // from which they are updated at grace-period start, as required.
3400 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3418 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3421 * Places the normal and expedited grace-period states in *@rgos. This
3437 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3444 * can explicitly wait for a grace period, for example, by passing @oldstate
3451 * more than a billion grace periods (and way more on a 64-bit system!).
3453 * (many hours even on 32-bit systems) should check them occasionally and
3456 * to get a guaranteed-completed grace-period state.
3458 * In addition, because oldstate compresses the grace-period state for
3464 * This function provides the same memory-ordering guarantees that
3481 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3488 * can explicitly wait for a grace period, for example, by passing @rgosp
3493 * for more than a billion grace periods (and way more on a 64-bit
3495 * long time periods (many hours even on 32-bit systems) should check
3498 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3499 * grace-period state.
3501 * This function provides the same memory-ordering guarantees that would
3505 * ->gp_seq field be checked instead of that of the rcu_state structure.
3506 * The problem is that the just-ending grace-period's callbacks can be
3507 * invoked between the time that the root rcu_node structure's ->gp_seq
3508 * field is updated and the time that the rcu_state structure's ->gp_seq
3517 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3518 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3519 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3520 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3521 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3530 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3535 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3539 * more than 2 billion grace periods (and way more on a 64-bit system!),
3542 * This function provides the same memory-ordering guarantees that
3555 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3561 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3566 * more than 2 billion grace periods (and way more on a 64-bit system!),
3569 * This function provides the same memory-ordering guarantees that
3582 * Check to see if there is any immediate RCU-related work to be done by
3585 * CPU-local state are performed first. However, we must check for CPU
3592 struct rcu_node *rnp = rdp->mynode;
3603 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3613 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3618 rcu_segcblist_ready_cbs(&rdp->cblist))
3621 /* Has RCU gone idle with this CPU needing another grace period? */
3622 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3624 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3628 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3629 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3660 rhp->next = rhp; // Mark the callback as having been invoked.
3662 rcu_barrier_trace(TPS("LastCB"), -1, s);
3665 rcu_barrier_trace(TPS("CB"), -1, s);
3670 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3675 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3682 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3683 rdp->barrier_head.func = rcu_barrier_callback;
3684 debug_rcu_head_queue(&rdp->barrier_head);
3688 * queue. This way we don't wait for bypass timer that can reach seconds
3691 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3693 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3694 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3697 debug_rcu_head_unqueue(&rdp->barrier_head);
3698 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3703 smp_store_release(&rdp->barrier_seq_snap, gseq);
3707 * Called with preemption disabled, and from cross-cpu IRQ context.
3715 WARN_ON_ONCE(cpu != rdp->cpu);
3723 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3725 * Note that this primitive does not necessarily wait for an RCU grace period
3738 rcu_barrier_trace(TPS("Begin"), -1, s);
3745 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3755 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3759 * to avoid a too-soon return to zero in case of an immediate
3760 * invocation of the just-enqueued callback (or preemption of
3761 * this task). Exclude CPU-hotplug operations to ensure that no
3762 * offline non-offloaded CPU has callbacks queued.
3776 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
3779 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
3780 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3787 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3797 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3808 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3812 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3818 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3829 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3834 * rcu_barrier() system-wide from use of this function, which means that
3835 * callers might needlessly wait a second or three.
3877 return -EAGAIN;
3880 atomic_inc((atomic_t *)kp->arg);
3882 atomic_dec((atomic_t *)kp->arg);
3892 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
3904 * This will not be stable unless the rcu_node structure's ->lock is
3910 return READ_ONCE(rnp->qsmaskinitnext);
3916 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3920 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
3956 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
3979 * and all tasks that were preempted within an RCU read-side critical
3981 * read-side critical section. Some other CPU is reporting this fact with
3982 * the specified rcu_node structure's ->lock held and interrupts disabled.
3984 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
3985 * the leaf rcu_node structure's ->qsmaskinit field has already been
4001 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4005 mask = rnp->grpmask;
4006 rnp = rnp->parent;
4010 rnp->qsmaskinit &= ~mask;
4012 WARN_ON_ONCE(rnp->qsmask);
4013 if (rnp->qsmaskinit) {
4023 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4025 * must hold the corresponding leaf rcu_node ->lock with interrupts
4035 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4037 mask = rnp->grpmask;
4038 rnp = rnp->parent;
4042 oldmask = rnp->qsmaskinit;
4043 rnp->qsmaskinit |= mask;
4051 * Do boot-time initialization of a CPU's per-CPU RCU data.
4060 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4061 INIT_WORK(&rdp->strict_work, strict_work_handler);
4062 WARN_ON_ONCE(ct->nesting != 1);
4064 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4065 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4066 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
4067 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4068 rdp->rcu_onl_gp_state = RCU_GP_CLEANED;
4069 rdp->last_sched_clock = jiffies;
4070 rdp->cpu = cpu;
4097 int rnp_index = rnp - rcu_get_root();
4099 if (rnp->exp_kworker)
4105 rnp->grplo, rnp->grphi);
4108 WRITE_ONCE(rnp->exp_kworker, kworker);
4111 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
4113 rcu_thread_affine_rnp(kworker->task, rnp);
4114 wake_up_process(kworker->task);
4130 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4136 mutex_lock(&rnp->kthread_mutex);
4139 mutex_unlock(&rnp->kthread_mutex);
4144 * Invoked early in the CPU-online process, when pretty much all services
4147 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4149 * accept some slop in the rsp->gp_seq access due to the fact that this
4150 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4162 rdp->qlen_last_fqs_check = 0;
4163 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4164 rdp->blimit = blimit;
4165 ct->nesting = 1; /* CPU not up, no tearing. */
4169 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4170 * (re-)initialized.
4172 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4173 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4176 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4180 rnp = rdp->mynode;
4182 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4183 rdp->gp_seq_needed = rdp->gp_seq;
4184 rdp->cpu_no_qs.b.norm = true;
4185 rdp->core_needs_qs = false;
4186 rdp->rcu_iw_pending = false;
4187 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4188 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4189 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4206 return smp_load_acquire(&rdp->beenonline);
4210 * Near the end of the CPU-online process. Pretty much all services
4220 rnp = rdp->mynode;
4222 rnp->ffmask |= rdp->grpmask;
4228 // Stop-machine done, so allow nohz_full to disable tick.
4235 * (both expedited and normal) will wait on it. Note that this means that
4236 * incoming CPUs are not allowed to use RCU read-side critical sections
4256 if (rdp->cpu_started)
4258 rdp->cpu_started = true;
4260 rnp = rdp->mynode;
4261 mask = rdp->grpmask;
4266 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4268 newcpu = !(rnp->expmaskinitnext & mask);
4269 rnp->expmaskinitnext |= mask;
4273 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4274 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4275 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
4278 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4284 /* Report QS -after- changing ->qsmaskinitnext! */
4285 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4290 smp_store_release(&rdp->beenonline, true);
4291 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4296 * the rcu_node tree's ->qsmaskinitnext bit masks.
4309 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4313 * may introduce a new READ-side while it is actually off the QS masks.
4322 mask = rdp->grpmask;
4324 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4325 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4326 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
4327 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4328 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4330 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4333 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4336 rdp->cpu_started = false;
4341 * The outgoing CPU has just passed through the dying-idle state, and we
4357 if (rcu_segcblist_empty(&rdp->cblist)) {
4365 my_rnp = my_rdp->mynode;
4372 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4375 rcu_segcblist_disable(&rdp->cblist);
4376 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4389 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4390 !rcu_segcblist_empty(&rdp->cblist),
4392 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4393 rcu_segcblist_first_cb(&rdp->cblist));
4405 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4406 // Stop-machine done, so allow nohz_full to disable tick.
4419 struct rcu_node *rnp = rdp->mynode;
4421 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4422 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4423 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4438 rnp = rdp->mynode;
4440 rnp->ffmask &= ~rdp->grpmask;
4443 // nohz_full CPUs need the tick for stop-machine to work quickly
4450 * On non-huge systems, use expedited RCU grace periods to make suspend
4486 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4500 /* This is a pre-SMP initcall, we expect a single CPU */
4503 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4507 rcu_spawn_rnp_kthreads(rdp->mynode);
4517 * initialization process. Before this is called, the idle task might
4518 * contain synchronous grace-period primitives (during which time, this idle
4519 * task is booting the system, and such primitives are no-ops). After this
4520 * function is called, any synchronous grace-period primitives are run as
4534 // Fix up the ->gp_seq counters.
4537 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4567 /* Initialize the level-tracking arrays. */
4571 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4576 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4583 raw_spin_lock_init(&rnp->fqslock);
4584 lockdep_set_class_and_name(&rnp->fqslock,
4586 rnp->gp_seq = rcu_state.gp_seq;
4587 rnp->gp_seq_needed = rcu_state.gp_seq;
4588 rnp->completedqs = rcu_state.gp_seq;
4589 rnp->qsmask = 0;
4590 rnp->qsmaskinit = 0;
4591 rnp->grplo = j * cpustride;
4592 rnp->grphi = (j + 1) * cpustride - 1;
4593 if (rnp->grphi >= nr_cpu_ids)
4594 rnp->grphi = nr_cpu_ids - 1;
4596 rnp->grpnum = 0;
4597 rnp->grpmask = 0;
4598 rnp->parent = NULL;
4600 rnp->grpnum = j % levelspread[i - 1];
4601 rnp->grpmask = BIT(rnp->grpnum);
4602 rnp->parent = rcu_state.level[i - 1] +
4603 j / levelspread[i - 1];
4605 rnp->level = i;
4606 INIT_LIST_HEAD(&rnp->blkd_tasks);
4608 init_waitqueue_head(&rnp->exp_wq[0]);
4609 init_waitqueue_head(&rnp->exp_wq[1]);
4610 init_waitqueue_head(&rnp->exp_wq[2]);
4611 init_waitqueue_head(&rnp->exp_wq[3]);
4612 spin_lock_init(&rnp->exp_lock);
4613 mutex_init(&rnp->kthread_mutex);
4614 raw_spin_lock_init(&rnp->exp_poll_lock);
4615 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4616 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4624 while (i > rnp->grphi)
4626 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4627 per_cpu_ptr(&rcu_data, i)->barrier_head.next =
4628 &per_cpu_ptr(&rcu_data, i)->barrier_head;
4634 * Force priority from the kernel command-line into range.
4658 * the ->node array in the rcu_state structure.
4694 /* If the compile-time values are accurate, just leave. */
4702 * The boot-time rcu_fanout_leaf parameter must be at least two
4704 * Complain and fall back to the compile-time values if this
4719 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4723 * If this limit is exceeded, fall back to the compile-time values.
4725 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4738 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4760 if (rnp->level != level) {
4763 level = rnp->level;
4765 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4788 * We don't need protection against CPU-hotplug here because
4806 /* -After- the rcu_node ->lock fields are initialized! */
4812 // Kick-start in case any polled grace periods started early.