Lines Matching +full:b +full:- +full:side

1 /* SPDX-License-Identifier: GPL-2.0+ */
27 * Return the value that the expedited-grace-period counter will have
46 * Take a snapshot of the expedited-grace-period counter, which is the
71 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
72 * recent CPU-online activity. Note that these masks are not cleared
75 * no-work-to-do fastpath.
98 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug()
104 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug()
105 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug()
113 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug()
114 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug()
118 if (rnp_up->expmaskinit) in sync_exp_reset_tree_hotplug()
120 rnp_up->expmaskinit |= mask; in sync_exp_reset_tree_hotplug()
124 mask = rnp_up->grpmask; in sync_exp_reset_tree_hotplug()
125 rnp_up = rnp_up->parent; in sync_exp_reset_tree_hotplug()
131 * Reset the ->expmask values in the rcu_node tree in preparation for
142 WARN_ON_ONCE(rnp->expmask); in sync_exp_reset_tree()
143 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); in sync_exp_reset_tree()
147 * until such time as the ->expmask bits are cleared. in sync_exp_reset_tree()
150 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); in sync_exp_reset_tree()
156 * Return non-zero if there is no RCU expedited grace period in progress
164 return READ_ONCE(rnp->exp_tasks) == NULL && in sync_rcu_exp_done()
165 READ_ONCE(rnp->expmask) == 0; in sync_rcu_exp_done()
170 * rcu_node's ->lock.
185 * Report the exit from RCU read-side critical section for the last task
186 * that queued itself during or before the current expedited preemptible-RCU
194 __releases(rnp->lock) in __rcu_report_exp_rnp()
201 if (!rnp->expmask) in __rcu_report_exp_rnp()
207 if (rnp->parent == NULL) { in __rcu_report_exp_rnp()
214 mask = rnp->grpmask; in __rcu_report_exp_rnp()
216 rnp = rnp->parent; in __rcu_report_exp_rnp()
218 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp()
219 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in __rcu_report_exp_rnp()
225 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
241 __releases(rnp->lock) in rcu_report_exp_cpu_mult()
248 if (!(rnp->expmask & mask_in)) { in rcu_report_exp_cpu_mult()
252 mask = mask_in & rnp->expmask; in rcu_report_exp_cpu_mult()
253 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in rcu_report_exp_cpu_mult()
256 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp) in rcu_report_exp_cpu_mult()
258 rdp->rcu_forced_tick_exp = false; in rcu_report_exp_cpu_mult()
261 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ in rcu_report_exp_cpu_mult()
270 struct rcu_node *rnp = rdp->mynode; in rcu_report_exp_rdp()
273 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); in rcu_report_exp_rdp()
274 ASSERT_EXCLUSIVE_WRITER(rdp->cpu_no_qs.b.exp); in rcu_report_exp_rdp()
275 rcu_report_exp_cpu_mult(rnp, flags, rdp->grpmask, true); in rcu_report_exp_rdp()
278 /* Common code for work-done checking. */
285 * completion with post GP update side accesses. Pairs with in sync_exp_work_done()
295 * Funnel-lock acquisition for expedited grace periods. Returns true
297 * can piggy-back on, and with no mutex held. Otherwise, returns false
304 struct rcu_node *rnp = rdp->mynode; in exp_funnel_lock()
307 /* Low-contention fastpath. */ in exp_funnel_lock()
308 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && in exp_funnel_lock()
310 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && in exp_funnel_lock()
317 * otherwise falls through to acquire ->exp_mutex. The mapping in exp_funnel_lock()
321 for (; rnp != NULL; rnp = rnp->parent) { in exp_funnel_lock()
326 spin_lock(&rnp->exp_lock); in exp_funnel_lock()
327 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { in exp_funnel_lock()
330 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
331 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, in exp_funnel_lock()
332 rnp->grplo, rnp->grphi, in exp_funnel_lock()
334 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in exp_funnel_lock()
338 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ in exp_funnel_lock()
339 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
340 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, in exp_funnel_lock()
341 rnp->grplo, rnp->grphi, TPS("nxtlvl")); in exp_funnel_lock()
371 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in __sync_rcu_exp_select_node_cpus()
373 unsigned long mask = rdp->grpmask; in __sync_rcu_exp_select_node_cpus()
377 !(rnp->qsmaskinitnext & mask)) { in __sync_rcu_exp_select_node_cpus()
398 rdp->exp_watching_snap = snap; in __sync_rcu_exp_select_node_cpus()
401 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; in __sync_rcu_exp_select_node_cpus()
408 unsigned long mask = rdp->grpmask; in __sync_rcu_exp_select_node_cpus()
411 if (rcu_watching_snap_stopped_since(rdp, rdp->exp_watching_snap)) { in __sync_rcu_exp_select_node_cpus()
428 if ((rnp->qsmaskinitnext & mask) && in __sync_rcu_exp_select_node_cpus()
429 (rnp->expmask & mask)) { in __sync_rcu_exp_select_node_cpus()
437 if (rnp->expmask & mask) in __sync_rcu_exp_select_node_cpus()
465 return !!READ_ONCE(rnp->exp_kworker); in rcu_exp_par_worker_started()
470 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); in sync_rcu_exp_select_cpus_queue_work()
476 kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work); in sync_rcu_exp_select_cpus_queue_work()
481 kthread_flush_work(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus_flush_work()
485 * Work-queue handler to drive an expedited grace period forward.
492 rcu_exp_sel_wait_wake(rewp->rew_s); in wait_rcu_exp_gp()
497 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp); in synchronize_rcu_expedited_queue_work()
498 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work); in synchronize_rcu_expedited_queue_work()
515 rnp->exp_need_flush = false; in sync_rcu_exp_select_cpus()
516 if (!READ_ONCE(rnp->expmask)) in sync_rcu_exp_select_cpus()
517 continue; /* Avoid early boot non-existent wq. */ in sync_rcu_exp_select_cpus()
522 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
526 rnp->exp_need_flush = true; in sync_rcu_exp_select_cpus()
531 if (rnp->exp_need_flush) in sync_rcu_exp_select_cpus()
567 …pr_err("INFO: %s detected expedited stalls, but suppressed full report due to a stuck CSD-lock.\n"… in synchronize_rcu_expedited_stall()
578 if (!(READ_ONCE(rnp->expmask) & mask)) in synchronize_rcu_expedited_stall()
582 pr_cont(" %d-%c%c%c%c", cpu, in synchronize_rcu_expedited_stall()
584 "o."[!!(rdp->grpmask & rnp->expmaskinit)], in synchronize_rcu_expedited_stall()
585 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)], in synchronize_rcu_expedited_stall()
586 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]); in synchronize_rcu_expedited_stall()
590 j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask), in synchronize_rcu_expedited_stall()
591 ".T"[!!data_race(rnp_root->exp_tasks)]); in synchronize_rcu_expedited_stall()
599 pr_cont(" l=%u:%d-%d:%#lx/%c", in synchronize_rcu_expedited_stall()
600 rnp->level, rnp->grplo, rnp->grphi, data_race(rnp->expmask), in synchronize_rcu_expedited_stall()
601 ".T"[!!data_race(rnp->exp_tasks)]); in synchronize_rcu_expedited_stall()
608 if (!(READ_ONCE(rnp->expmask) & mask)) in synchronize_rcu_expedited_stall()
639 mask = READ_ONCE(rnp->expmask); in synchronize_rcu_expedited_wait()
642 if (rdp->rcu_forced_tick_exp) in synchronize_rcu_expedited_wait()
644 rdp->rcu_forced_tick_exp = true; in synchronize_rcu_expedited_wait()
666 rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start)); in synchronize_rcu_expedited_wait()
679 * wake up everyone who piggybacked on the just-completed expedited
680 * grace period. Also update all the ->exp_seq_rq counters as needed
681 * in order to avoid counter-wrap problems.
697 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { in rcu_exp_wait_wake()
698 spin_lock(&rnp->exp_lock); in rcu_exp_wait_wake()
700 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) in rcu_exp_wait_wake()
701 WRITE_ONCE(rnp->exp_seq_rq, s); in rcu_exp_wait_wake()
702 spin_unlock(&rnp->exp_lock); in rcu_exp_wait_wake()
705 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); in rcu_exp_wait_wake()
713 * workqueues and mid-boot-time tasks.
728 ASSERT_EXCLUSIVE_WRITER_SCOPED(*this_cpu_ptr(&rcu_data.cpu_no_qs.b.exp)); in rcu_exp_need_qs()
729 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); in rcu_exp_need_qs()
740 * RCU read-side critical section in effect, request that the
742 * ->expmask fields in the rcu_node tree. Otherwise, immediately
750 struct rcu_node *rnp = rdp->mynode; in rcu_exp_handler()
757 ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp); in rcu_exp_handler()
758 if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in rcu_exp_handler()
759 READ_ONCE(rdp->cpu_no_qs.b.exp))) in rcu_exp_handler()
763 * Second, the common case of not being in an RCU read-side in rcu_exp_handler()
777 * Third, the less-common case of being in an RCU read-side in rcu_exp_handler()
782 * grace period is still waiting on this CPU, set ->deferred_qs in rcu_exp_handler()
786 * reported, so we really do need to check ->expmask. in rcu_exp_handler()
790 if (rnp->expmask & rdp->grpmask) { in rcu_exp_handler()
791 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); in rcu_exp_handler()
792 t->rcu_read_unlock_special.b.exp_hint = true; in rcu_exp_handler()
803 * Scan the current list of tasks blocked within RCU read-side critical
814 if (!rnp->exp_tasks) { in rcu_print_task_exp_stall()
818 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall()
820 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall()
821 pr_cont(" P%d", t->pid); in rcu_print_task_exp_stall()
829 * Scan the current list of tasks blocked within RCU read-side critical
841 if (!READ_ONCE(rnp->exp_tasks)) { in rcu_exp_print_detail_task_stall_rnp()
845 t = list_entry(rnp->exp_tasks->prev, in rcu_exp_print_detail_task_stall_rnp()
847 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_exp_print_detail_task_stall_rnp()
860 /* Invoked on each online non-idle CPU for expedited quiescent state. */
864 struct rcu_node *rnp = rdp->mynode; in rcu_exp_handler()
867 ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp); in rcu_exp_handler()
868 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in rcu_exp_handler()
869 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) in rcu_exp_handler()
881 * tasks blocked within RCU read-side critical sections that are
891 * tasks blocked within RCU read-side critical sections that are blocking
901 * synchronize_rcu_expedited - Brute-force RCU grace period
904 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
907 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
908 * other hand, if the CPU is not in an RCU read-side critical section,
912 * implementations, it is still unfriendly to real-time workloads, so is
913 * thus not recommended for any sort of common-case code. In fact, if
930 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); in synchronize_rcu_expedited()
938 // them, which allows reuse of ->gp_seq_polled_exp_snap. in synchronize_rcu_expedited()
972 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in synchronize_rcu_expedited()
991 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
992 s = rnp->exp_seq_poll_rq; in sync_rcu_do_polled_gp()
993 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in sync_rcu_do_polled_gp()
994 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1003 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1004 s = rnp->exp_seq_poll_rq; in sync_rcu_do_polled_gp()
1006 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in sync_rcu_do_polled_gp()
1007 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1011 …* start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace peri…
1028 rnp = rdp->mynode; in start_poll_synchronize_rcu_expedited()
1030 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); in start_poll_synchronize_rcu_expedited()
1033 rnp->exp_seq_poll_rq = s; in start_poll_synchronize_rcu_expedited()
1034 queue_work(rcu_gp_wq, &rnp->exp_poll_wq); in start_poll_synchronize_rcu_expedited()
1038 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); in start_poll_synchronize_rcu_expedited()
1045 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1046 * @rgosp: Place to put snapshot of grace-period state
1048 * Places the normal and expedited grace-period states in rgosp. This
1063 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1074 * more than 2 billion grace periods (and way more on a 64-bit system!),
1077 * This function provides the same memory-ordering guarantees that
1090 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1101 * more than 2 billion grace periods (and way more on a 64-bit system!),
1104 * This function provides the same memory-ordering guarantees that