/linux/kernel/rcu/ |
H A D | tree_exp.h | 14 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 15 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp); 84 struct rcu_node *rnp; in sync_exp_reset_tree_hotplug() local 96 rcu_for_each_leaf_node(rnp) { in sync_exp_reset_tree_hotplug() 97 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 98 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug() 99 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 104 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug() 105 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug() 106 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() [all …]
|
H A D | tree_plugin.h | 116 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 162 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue() argument 163 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue() 165 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue() 166 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue() 167 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue() 168 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue() 171 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_ctxt_queue() 172 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue() 173 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_ctxt_queue() [all …]
|
H A D | tree.c | 150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 152 static struct task_struct *rcu_boost_task(struct rcu_node *rnp); 156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 768 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument 770 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf() 772 rnp->gp_seq)) in rcu_gpnum_ovf() 774 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf() 775 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf() 790 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp in rcu_watching_snap_save() 819 struct rcu_node *rnp = rdp->mynode; rcu_watching_snap_recheck() local 952 trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s) trace_rcu_this_gp() argument 980 struct rcu_node *rnp; rcu_start_this_gp() local 1051 rcu_future_gp_cleanup(struct rcu_node * rnp) rcu_future_gp_cleanup() argument 1135 rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp) rcu_accelerate_cbs() argument 1181 rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp) rcu_accelerate_cbs_unlocked() argument 1211 rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp) rcu_advance_cbs() argument 1234 rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp) rcu_advance_cbs_nowake() argument 1265 __note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp) __note_gp_changes() argument 1318 struct rcu_node *rnp; note_gp_changes() local 1406 struct rcu_node *rnp = rcu_get_root(); rcu_poll_gp_seq_start() local 1422 struct rcu_node *rnp = rcu_get_root(); rcu_poll_gp_seq_end() local 1444 struct rcu_node *rnp = rcu_get_root(); rcu_poll_gp_seq_start_unlocked() local 1461 struct rcu_node *rnp = rcu_get_root(); rcu_poll_gp_seq_end_unlocked() local 1802 struct rcu_node *rnp = rcu_get_root(); rcu_gp_init() local 1957 struct rcu_node *rnp = rcu_get_root(); rcu_gp_fqs_check_wake() local 1981 struct rcu_node *rnp = rcu_get_root(); rcu_gp_fqs() local 2020 struct rcu_node *rnp = rcu_get_root(); rcu_gp_fqs_loop() local 2108 struct rcu_node *rnp = rcu_get_root(); rcu_gp_cleanup() local 2289 rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags) rcu_report_qs_rnp() argument 2354 rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags) rcu_report_unblock_qs_rnp() argument 2397 struct rcu_node *rnp; rcu_report_qs_rdp() local 2688 struct rcu_node *rnp; force_qs_rnp() local 2747 struct rcu_node *rnp; rcu_force_quiescent_state() local 2791 struct rcu_node *rnp = rdp->mynode; rcu_core() local 3024 check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp) check_cb_ovld_locked() argument 3049 struct rcu_node *const rnp = rdp->mynode; check_cb_ovld() local 4086 struct rcu_node *rnp; synchronize_rcu() local 4173 struct rcu_node *rnp = rcu_get_root(); get_state_synchronize_rcu_full() local 4194 struct rcu_node *rnp; start_poll_synchronize_rcu_common() local 4335 struct rcu_node *rnp = rcu_get_root(); poll_state_synchronize_rcu_full() local 4412 struct rcu_node *rnp = rdp->mynode; rcu_pending() local 4728 rcu_rnp_online_cpus(struct rcu_node * rnp) rcu_rnp_online_cpus() argument 4817 struct rcu_node *rnp = rnp_leaf; rcu_cleanup_dead_rnp() local 4852 struct rcu_node *rnp = rnp_leaf; rcu_init_new_rnp() local 4896 rcu_spawn_exp_par_gp_kworker(struct rcu_node * rnp) rcu_spawn_exp_par_gp_kworker() argument 4918 rcu_exp_par_gp_task(struct rcu_node * rnp) rcu_exp_par_gp_task() argument 4944 rcu_spawn_rnp_kthreads(struct rcu_node * rnp) rcu_spawn_rnp_kthreads() argument 4969 struct rcu_node *rnp = rcu_get_root(); rcutree_prepare_cpu() local 5028 struct rcu_node *rnp; rcutree_affinity_setting() local 5089 struct rcu_node *rnp; rcutree_online_cpu() local 5124 struct rcu_node *rnp; rcutree_report_cpu_starting() local 5182 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ rcutree_report_cpu_dead() local 5292 struct rcu_node *rnp = rdp->mynode; rcutree_dying_cpu() local 5308 struct rcu_node *rnp; rcutree_offline_cpu() local 5354 struct rcu_node *rnp; rcu_spawn_gp_kthread() local 5403 struct rcu_node *rnp; rcu_scheduler_starting() local 5434 struct rcu_node *rnp; rcu_init_one() local 5631 struct rcu_node *rnp; rcu_dump_rcu_node_tree() local [all...] |
H A D | tree_stall.h | 224 struct rcu_node *rnp; in rcu_iw_handler() local 227 rnp = rdp->mynode; in rcu_iw_handler() 228 raw_spin_lock_rcu_node(rnp); in rcu_iw_handler() 230 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_iw_handler() 233 raw_spin_unlock_rcu_node(rnp); in rcu_iw_handler() 246 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_print_detail_task_stall_rnp() argument 251 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp() 252 if (!rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_print_detail_task_stall_rnp() 253 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp() 256 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp() [all …]
|
H A D | rcu.h | 382 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) argument 385 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) argument 392 #define _rcu_for_each_node_breadth_first(sp, rnp) \ argument 393 for ((rnp) = &(sp)->node[0]; \ 394 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) 395 #define rcu_for_each_node_breadth_first(rnp) \ argument 396 _rcu_for_each_node_breadth_first(&rcu_state, rnp) 397 #define srcu_for_each_node_breadth_first(ssp, rnp) \ argument 398 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp) 406 #define rcu_for_each_leaf_node(rnp) \ argument [all …]
|
H A D | tree.h | 146 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) argument 471 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 473 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 475 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 476 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 478 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 479 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 480 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 483 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); 484 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); [all …]
|
H A D | tree_nocb.h | 182 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) in rcu_nocb_gp_get() argument 184 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get() 187 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument 189 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb() 190 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb() 667 struct rcu_node *rnp; in nocb_gp_wait() local 730 rnp = rdp->mynode; in nocb_gp_wait() 737 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { in nocb_gp_wait() 738 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */ in nocb_gp_wait() 739 needwake_gp = rcu_advance_cbs(rnp, rdp); in nocb_gp_wait() [all …]
|
/linux/Documentation/RCU/Design/Memory-Ordering/ |
H A D | Tree-RCU-Memory-Ordering.rst | 84 5 raw_spin_lock_rcu_node(rnp); 87 8 raw_spin_unlock_rcu_node(rnp); 92 13 raw_spin_lock_rcu_node(rnp); 95 16 raw_spin_unlock_rcu_node(rnp); 206 5 struct rcu_node *rnp; 232 31 rnp = rdp->mynode; 233 32 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 234 33 needwake = rcu_accelerate_cbs(rnp, rdp); 235 34 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
/linux/Documentation/RCU/Design/Data-Structures/ |
H A D | Data-Structures.rst | 1106 6 #define rcu_for_each_node_breadth_first(rsp, rnp) \ 1107 7 for ((rnp) = &(rsp)->node[0]; \ 1108 8 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) 1110 10 #define rcu_for_each_leaf_node(rsp, rnp) \ 1111 11 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1112 12 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|