Lines Matching +full:wait +full:- +full:state
1 /* SPDX-License-Identifier: GPL-2.0+ */
27 * Return the value that the expedited-grace-period counter will have
46 * Take a snapshot of the expedited-grace-period counter, which is the
71 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
72 * recent CPU-online activity. Note that these masks are not cleared
75 * no-work-to-do fastpath.
98 if (rnp->expmaskinit == rnp->expmaskinitnext) {
104 oldmask = rnp->expmaskinit;
105 rnp->expmaskinit = rnp->expmaskinitnext;
113 mask = rnp->grpmask;
114 rnp_up = rnp->parent;
118 if (rnp_up->expmaskinit)
120 rnp_up->expmaskinit |= mask;
124 mask = rnp_up->grpmask;
125 rnp_up = rnp_up->parent;
131 * Reset the ->expmask values in the rcu_node tree in preparation for
142 WARN_ON_ONCE(rnp->expmask);
143 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
149 * Return non-zero if there is no RCU expedited grace period in progress
157 return READ_ONCE(rnp->exp_tasks) == NULL &&
158 READ_ONCE(rnp->expmask) == 0;
163 * rcu_node's ->lock.
178 * Report the exit from RCU read-side critical section for the last task
179 * that queued itself during or before the current expedited preemptible-RCU
187 __releases(rnp->lock)
194 if (!rnp->expmask)
200 if (rnp->parent == NULL) {
207 mask = rnp->grpmask;
209 rnp = rnp->parent;
211 WARN_ON_ONCE(!(rnp->expmask & mask));
212 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
217 * Report expedited quiescent state for specified node. This is a
218 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
229 * Report expedited quiescent state for multiple CPUs, all covered by the
234 __releases(rnp->lock)
241 if (!(rnp->expmask & mask_in)) {
245 mask = mask_in & rnp->expmask;
246 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
249 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
251 rdp->rcu_forced_tick_exp = false;
254 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
258 * Report expedited quiescent state for specified rcu_data (CPU).
263 struct rcu_node *rnp = rdp->mynode;
266 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
267 ASSERT_EXCLUSIVE_WRITER(rdp->cpu_no_qs.b.exp);
268 rcu_report_exp_cpu_mult(rnp, flags, rdp->grpmask, true);
271 /* Common code for work-done checking. */
288 * Funnel-lock acquisition for expedited grace periods. Returns true
290 * can piggy-back on, and with no mutex held. Otherwise, returns false
297 struct rcu_node *rnp = rdp->mynode;
300 /* Low-contention fastpath. */
301 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
303 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
310 * otherwise falls through to acquire ->exp_mutex. The mapping
314 for (; rnp != NULL; rnp = rnp->parent) {
318 /* Work not done, either wait here or go up. */
319 spin_lock(&rnp->exp_lock);
320 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
322 /* Someone else doing GP, so wait for them. */
323 spin_unlock(&rnp->exp_lock);
324 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
325 rnp->grplo, rnp->grphi,
326 TPS("wait"));
327 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
331 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
332 spin_unlock(&rnp->exp_lock);
333 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
334 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
349 * expedited grace period needs to wait for.
364 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
366 unsigned long mask = rdp->grpmask;
370 !(rnp->qsmaskinitnext & mask)) {
391 rdp->exp_watching_snap = snap;
394 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
397 * Need to wait for any blocked tasks as well. Note that
399 * until such time as the ->expmask bits are cleared.
402 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
405 /* IPI the remaining CPUs for expedited quiescent state. */
408 unsigned long mask = rdp->grpmask;
411 if (rcu_watching_snap_stopped_since(rdp, rdp->exp_watching_snap)) {
428 if ((rnp->qsmaskinitnext & mask) &&
429 (rnp->expmask & mask)) {
437 if (rnp->expmask & mask)
465 return !!READ_ONCE(rnp->exp_kworker);
470 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
476 kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work);
481 kthread_flush_work(&rnp->rew.rew_work);
485 * Work-queue handler to drive an expedited grace period forward.
492 rcu_exp_sel_wait_wake(rewp->rew_s);
497 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
498 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
503 * to wait for.
515 rnp->exp_need_flush = false;
516 if (!READ_ONCE(rnp->expmask))
517 continue; /* Avoid early boot non-existent wq. */
522 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
526 rnp->exp_need_flush = true;
529 /* Wait for jobs (if any) to complete. */
531 if (rnp->exp_need_flush)
536 * Wait for the expedited grace period to elapse, within time limit.
567 pr_err("INFO: %s detected expedited stalls, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name);
578 if (!(READ_ONCE(rnp->expmask) & mask))
582 pr_cont(" %d-%c%c%c%c", cpu,
584 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
585 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
586 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
590 j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask),
591 ".T"[!!data_race(rnp_root->exp_tasks)]);
599 pr_cont(" l=%u:%d-%d:%#lx/%c",
600 rnp->level, rnp->grplo, rnp->grphi, data_race(rnp->expmask),
601 ".T"[!!data_race(rnp->exp_tasks)]);
608 if (!(READ_ONCE(rnp->expmask) & mask))
617 * Wait for the expedited grace period to elapse, issuing any needed
639 mask = READ_ONCE(rnp->expmask);
642 if (rdp->rcu_forced_tick_exp)
644 rdp->rcu_forced_tick_exp = true;
666 rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
678 * Wait for the current expedited grace period to complete, and then
679 * wake up everyone who piggybacked on the just-completed expedited
680 * grace period. Also update all the ->exp_seq_rq counters as needed
681 * in order to avoid counter-wrap problems.
697 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
698 spin_lock(&rnp->exp_lock);
700 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
701 WRITE_ONCE(rnp->exp_seq_rq, s);
702 spin_unlock(&rnp->exp_lock);
705 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
713 * workqueues and mid-boot-time tasks.
717 /* Initialize the rcu_node tree in preparation for the wait. */
720 /* Wait and clean up, including waking everyone. */
724 /* Request an expedited quiescent state. */
740 * RCU read-side critical section in effect, request that the
741 * next rcu_read_unlock() record the quiescent state up the
742 * ->expmask fields in the rcu_node tree. Otherwise, immediately
743 * report the quiescent state.
750 struct rcu_node *rnp = rdp->mynode;
754 * First, is there no need for a quiescent state from this CPU,
755 * or is this CPU already looking for a quiescent state for the
758 * sync_sched_exp_online_cleanup() implementation being a no-op,
761 ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
762 if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
763 READ_ONCE(rdp->cpu_no_qs.b.exp)))
767 * Second, the common case of not being in an RCU read-side
769 * report the quiescent state, otherwise defer.
781 * Third, the less-common case of being in an RCU read-side
786 * grace period is still waiting on this CPU, set ->deferred_qs
787 * so that the eventual quiescent state will be reported.
789 * can have caused this quiescent state to already have been
790 * reported, so we really do need to check ->expmask.
794 if (rnp->expmask & rdp->grpmask) {
795 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
796 t->rcu_read_unlock_special.b.exp_hint = true;
812 * Scan the current list of tasks blocked within RCU read-side critical
823 if (!rnp->exp_tasks) {
827 t = list_entry(rnp->exp_tasks->prev,
829 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
830 pr_cont(" P%d", t->pid);
838 * Scan the current list of tasks blocked within RCU read-side critical
850 if (!READ_ONCE(rnp->exp_tasks)) {
854 t = list_entry(rnp->exp_tasks->prev,
856 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
869 /* Invoked on each online non-idle CPU for expedited quiescent state. */
873 struct rcu_node *rnp = rdp->mynode;
876 ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
877 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
888 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
898 rnp = rdp->mynode;
900 /* Quiescent state either not needed or already requested, leave. */
901 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
902 READ_ONCE(rdp->cpu_no_qs.b.exp)) {
906 /* Quiescent state needed on current CPU, so set it up locally. */
914 /* Quiescent state needed on some other CPU, send IPI. */
922 * tasks blocked within RCU read-side critical sections that are
932 * tasks blocked within RCU read-side critical sections that are blocking
942 * synchronize_rcu_expedited - Brute-force RCU grace period
944 * Wait for an RCU grace period, but expedite it. The basic idea is to
945 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
947 * causes the outermost rcu_read_unlock() to report the quiescent state
948 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
949 * other hand, if the CPU is not in an RCU read-side critical section,
950 * the IPI handler reports the quiescent state immediately.
953 * implementations, it is still unfriendly to real-time workloads, so is
954 * thus not recommended for any sort of common-case code. In fact, if
971 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
973 /* Is the state is such that the call is a grace period? */
979 // them, which allows reuse of ->gp_seq_polled_exp_snap.
1011 /* Wait for expedited grace period to complete. */
1013 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
1032 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1033 s = rnp->exp_seq_poll_rq;
1034 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1035 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1044 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1045 s = rnp->exp_seq_poll_rq;
1047 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1048 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1052 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1069 rnp = rdp->mynode;
1071 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1074 rnp->exp_seq_poll_rq = s;
1075 queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1079 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1086 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1087 * @rgosp: Place to put snapshot of grace-period state
1089 * Places the normal and expedited grace-period states in rgosp. This
1090 * state value can be passed to a later call to cond_synchronize_rcu_full()
1104 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1111 * invoke synchronize_rcu_expedited() to wait for a full grace period.
1115 * more than 2 billion grace periods (and way more on a 64-bit system!),
1118 * This function provides the same memory-ordering guarantees that
1131 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1138 * to wait for a full grace period.
1142 * more than 2 billion grace periods (and way more on a 64-bit system!),
1145 * This function provides the same memory-ordering guarantees that