1 /* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, you can access it online at 17 * http://www.gnu.org/licenses/gpl-2.0.html. 18 * 19 * Copyright IBM Corporation, 2008 20 * 21 * Author: Ingo Molnar <mingo@elte.hu> 22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 23 */ 24 25 #include <linux/cache.h> 26 #include <linux/spinlock.h> 27 #include <linux/threads.h> 28 #include <linux/cpumask.h> 29 #include <linux/seqlock.h> 30 #include <linux/stop_machine.h> 31 32 /* 33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 34 * CONFIG_RCU_FANOUT_LEAF. 35 * In theory, it should be possible to add more levels straightforwardly. 36 * In practice, this did work well going from three levels to four. 37 * Of course, your mileage may vary. 38 */ 39 40 #ifdef CONFIG_RCU_FANOUT 41 #define RCU_FANOUT CONFIG_RCU_FANOUT 42 #else /* #ifdef CONFIG_RCU_FANOUT */ 43 # ifdef CONFIG_64BIT 44 # define RCU_FANOUT 64 45 # else 46 # define RCU_FANOUT 32 47 # endif 48 #endif /* #else #ifdef CONFIG_RCU_FANOUT */ 49 50 #ifdef CONFIG_RCU_FANOUT_LEAF 51 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF 52 #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ 53 # ifdef CONFIG_64BIT 54 # define RCU_FANOUT_LEAF 64 55 # else 56 # define RCU_FANOUT_LEAF 32 57 # endif 58 #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ 59 60 #define RCU_FANOUT_1 (RCU_FANOUT_LEAF) 61 #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) 62 #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) 63 #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) 64 65 #if NR_CPUS <= RCU_FANOUT_1 66 # define RCU_NUM_LVLS 1 67 # define NUM_RCU_LVL_0 1 68 # define NUM_RCU_NODES NUM_RCU_LVL_0 69 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 } 70 # define RCU_NODE_NAME_INIT { "rcu_node_0" } 71 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } 72 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0" } 73 #elif NR_CPUS <= RCU_FANOUT_2 74 # define RCU_NUM_LVLS 2 75 # define NUM_RCU_LVL_0 1 76 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 77 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1) 78 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 } 79 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } 80 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } 81 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" } 82 #elif NR_CPUS <= RCU_FANOUT_3 83 # define RCU_NUM_LVLS 3 84 # define NUM_RCU_LVL_0 1 85 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 86 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 87 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2) 88 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 } 89 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } 90 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } 91 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" } 92 #elif NR_CPUS <= RCU_FANOUT_4 93 # define RCU_NUM_LVLS 4 94 # define NUM_RCU_LVL_0 1 95 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) 96 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 97 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 98 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) 99 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 } 100 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } 101 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } 102 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" } 103 #else 104 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 105 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ 106 107 extern int rcu_num_lvls; 108 extern int rcu_num_nodes; 109 110 /* 111 * Dynticks per-CPU state. 112 */ 113 struct rcu_dynticks { 114 long long dynticks_nesting; /* Track irq/process nesting level. */ 115 /* Process level is worth LLONG_MAX/2. */ 116 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 117 atomic_t dynticks; /* Even value for idle, else odd. */ 118 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE 119 long long dynticks_idle_nesting; 120 /* irq/process nesting level from idle. */ 121 atomic_t dynticks_idle; /* Even value for idle, else odd. */ 122 /* "Idle" excludes userspace execution. */ 123 unsigned long dynticks_idle_jiffies; 124 /* End of last non-NMI non-idle period. */ 125 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 126 #ifdef CONFIG_RCU_FAST_NO_HZ 127 bool all_lazy; /* Are all CPU's CBs lazy? */ 128 unsigned long nonlazy_posted; 129 /* # times non-lazy CBs posted to CPU. */ 130 unsigned long nonlazy_posted_snap; 131 /* idle-period nonlazy_posted snapshot. */ 132 unsigned long last_accelerate; 133 /* Last jiffy CBs were accelerated. */ 134 unsigned long last_advance_all; 135 /* Last jiffy CBs were all advanced. */ 136 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 137 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 138 }; 139 140 /* RCU's kthread states for tracing. */ 141 #define RCU_KTHREAD_STOPPED 0 142 #define RCU_KTHREAD_RUNNING 1 143 #define RCU_KTHREAD_WAITING 2 144 #define RCU_KTHREAD_OFFCPU 3 145 #define RCU_KTHREAD_YIELDING 4 146 #define RCU_KTHREAD_MAX 4 147 148 /* 149 * Definition for node within the RCU grace-period-detection hierarchy. 150 */ 151 struct rcu_node { 152 raw_spinlock_t lock; /* Root rcu_node's lock protects some */ 153 /* rcu_state fields as well as following. */ 154 unsigned long gpnum; /* Current grace period for this node. */ 155 /* This will either be equal to or one */ 156 /* behind the root rcu_node's gpnum. */ 157 unsigned long completed; /* Last GP completed for this node. */ 158 /* This will either be equal to or one */ 159 /* behind the root rcu_node's gpnum. */ 160 unsigned long qsmask; /* CPUs or groups that need to switch in */ 161 /* order for current grace period to proceed.*/ 162 /* In leaf rcu_node, each bit corresponds to */ 163 /* an rcu_data structure, otherwise, each */ 164 /* bit corresponds to a child rcu_node */ 165 /* structure. */ 166 unsigned long qsmaskinit; 167 /* Per-GP initial value for qsmask. */ 168 /* Initialized from ->qsmaskinitnext at the */ 169 /* beginning of each grace period. */ 170 unsigned long qsmaskinitnext; 171 /* Online CPUs for next grace period. */ 172 unsigned long expmask; /* CPUs or groups that need to check in */ 173 /* to allow the current expedited GP */ 174 /* to complete. */ 175 unsigned long expmaskinit; 176 /* Per-GP initial values for expmask. */ 177 /* Initialized from ->expmaskinitnext at the */ 178 /* beginning of each expedited GP. */ 179 unsigned long expmaskinitnext; 180 /* Online CPUs for next expedited GP. */ 181 /* Any CPU that has ever been online will */ 182 /* have its bit set. */ 183 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 184 /* Only one bit will be set in this mask. */ 185 int grplo; /* lowest-numbered CPU or group here. */ 186 int grphi; /* highest-numbered CPU or group here. */ 187 u8 grpnum; /* CPU/group number for next level up. */ 188 u8 level; /* root is at level 0. */ 189 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ 190 /* exit RCU read-side critical sections */ 191 /* before propagating offline up the */ 192 /* rcu_node tree? */ 193 struct rcu_node *parent; 194 struct list_head blkd_tasks; 195 /* Tasks blocked in RCU read-side critical */ 196 /* section. Tasks are placed at the head */ 197 /* of this list and age towards the tail. */ 198 struct list_head *gp_tasks; 199 /* Pointer to the first task blocking the */ 200 /* current grace period, or NULL if there */ 201 /* is no such task. */ 202 struct list_head *exp_tasks; 203 /* Pointer to the first task blocking the */ 204 /* current expedited grace period, or NULL */ 205 /* if there is no such task. If there */ 206 /* is no current expedited grace period, */ 207 /* then there can cannot be any such task. */ 208 struct list_head *boost_tasks; 209 /* Pointer to first task that needs to be */ 210 /* priority boosted, or NULL if no priority */ 211 /* boosting is needed for this rcu_node */ 212 /* structure. If there are no tasks */ 213 /* queued on this rcu_node structure that */ 214 /* are blocking the current grace period, */ 215 /* there can be no such task. */ 216 struct rt_mutex boost_mtx; 217 /* Used only for the priority-boosting */ 218 /* side effect, not as a lock. */ 219 unsigned long boost_time; 220 /* When to start boosting (jiffies). */ 221 struct task_struct *boost_kthread_task; 222 /* kthread that takes care of priority */ 223 /* boosting for this rcu_node structure. */ 224 unsigned int boost_kthread_status; 225 /* State of boost_kthread_task for tracing. */ 226 unsigned long n_tasks_boosted; 227 /* Total number of tasks boosted. */ 228 unsigned long n_exp_boosts; 229 /* Number of tasks boosted for expedited GP. */ 230 unsigned long n_normal_boosts; 231 /* Number of tasks boosted for normal GP. */ 232 unsigned long n_balk_blkd_tasks; 233 /* Refused to boost: no blocked tasks. */ 234 unsigned long n_balk_exp_gp_tasks; 235 /* Refused to boost: nothing blocking GP. */ 236 unsigned long n_balk_boost_tasks; 237 /* Refused to boost: already boosting. */ 238 unsigned long n_balk_notblocked; 239 /* Refused to boost: RCU RS CS still running. */ 240 unsigned long n_balk_notyet; 241 /* Refused to boost: not yet time. */ 242 unsigned long n_balk_nos; 243 /* Refused to boost: not sure why, though. */ 244 /* This can happen due to race conditions. */ 245 #ifdef CONFIG_RCU_NOCB_CPU 246 wait_queue_head_t nocb_gp_wq[2]; 247 /* Place for rcu_nocb_kthread() to wait GP. */ 248 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 249 int need_future_gp[2]; 250 /* Counts of upcoming no-CB GP requests. */ 251 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 252 253 struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp; 254 } ____cacheline_internodealigned_in_smp; 255 256 /* 257 * Do a full breadth-first scan of the rcu_node structures for the 258 * specified rcu_state structure. 259 */ 260 #define rcu_for_each_node_breadth_first(rsp, rnp) \ 261 for ((rnp) = &(rsp)->node[0]; \ 262 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 263 264 /* 265 * Do a breadth-first scan of the non-leaf rcu_node structures for the 266 * specified rcu_state structure. Note that if there is a singleton 267 * rcu_node tree with but one rcu_node structure, this loop is a no-op. 268 */ 269 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ 270 for ((rnp) = &(rsp)->node[0]; \ 271 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++) 272 273 /* 274 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state 275 * structure. Note that if there is a singleton rcu_node tree with but 276 * one rcu_node structure, this loop -will- visit the rcu_node structure. 277 * It is still a leaf node, even if it is also the root node. 278 */ 279 #define rcu_for_each_leaf_node(rsp, rnp) \ 280 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ 281 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 282 283 /* 284 * Union to allow "aggregate OR" operation on the need for a quiescent 285 * state by the normal and expedited grace periods. 286 */ 287 union rcu_noqs { 288 struct { 289 u8 norm; 290 u8 exp; 291 } b; /* Bits. */ 292 u16 s; /* Set of bits, aggregate OR here. */ 293 }; 294 295 /* Index values for nxttail array in struct rcu_data. */ 296 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 297 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 298 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ 299 #define RCU_NEXT_TAIL 3 300 #define RCU_NEXT_SIZE 4 301 302 /* Per-CPU data for read-copy update. */ 303 struct rcu_data { 304 /* 1) quiescent-state and grace-period handling : */ 305 unsigned long completed; /* Track rsp->completed gp number */ 306 /* in order to detect GP end. */ 307 unsigned long gpnum; /* Highest gp number that this CPU */ 308 /* is aware of having started. */ 309 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ 310 /* for rcu_all_qs() invocations. */ 311 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ 312 bool core_needs_qs; /* Core waits for quiesc state. */ 313 bool beenonline; /* CPU online at least once. */ 314 bool gpwrap; /* Possible gpnum/completed wrap. */ 315 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 316 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 317 unsigned long ticks_this_gp; /* The number of scheduling-clock */ 318 /* ticks this CPU has handled */ 319 /* during and after the last grace */ 320 /* period it is aware of. */ 321 322 /* 2) batch handling */ 323 /* 324 * If nxtlist is not NULL, it is partitioned as follows. 325 * Any of the partitions might be empty, in which case the 326 * pointer to that partition will be equal to the pointer for 327 * the following partition. When the list is empty, all of 328 * the nxttail elements point to the ->nxtlist pointer itself, 329 * which in that case is NULL. 330 * 331 * [nxtlist, *nxttail[RCU_DONE_TAIL]): 332 * Entries that batch # <= ->completed 333 * The grace period for these entries has completed, and 334 * the other grace-period-completed entries may be moved 335 * here temporarily in rcu_process_callbacks(). 336 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): 337 * Entries that batch # <= ->completed - 1: waiting for current GP 338 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): 339 * Entries known to have arrived before current GP ended 340 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): 341 * Entries that might have arrived after current GP ended 342 * Note that the value of *nxttail[RCU_NEXT_TAIL] will 343 * always be NULL, as this is the end of the list. 344 */ 345 struct rcu_head *nxtlist; 346 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 347 unsigned long nxtcompleted[RCU_NEXT_SIZE]; 348 /* grace periods for sublists. */ 349 long qlen_lazy; /* # of lazy queued callbacks */ 350 long qlen; /* # of queued callbacks, incl lazy */ 351 long qlen_last_fqs_check; 352 /* qlen at last check for QS forcing */ 353 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ 354 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */ 355 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ 356 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ 357 unsigned long n_force_qs_snap; 358 /* did other CPU force QS recently? */ 359 long blimit; /* Upper limit on a processed batch */ 360 361 /* 3) dynticks interface. */ 362 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ 363 int dynticks_snap; /* Per-GP tracking for dynticks. */ 364 365 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 366 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 367 unsigned long offline_fqs; /* Kicked due to being offline. */ 368 unsigned long cond_resched_completed; 369 /* Grace period that needs help */ 370 /* from cond_resched(). */ 371 372 /* 5) __rcu_pending() statistics. */ 373 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 374 unsigned long n_rp_core_needs_qs; 375 unsigned long n_rp_report_qs; 376 unsigned long n_rp_cb_ready; 377 unsigned long n_rp_cpu_needs_gp; 378 unsigned long n_rp_gp_completed; 379 unsigned long n_rp_gp_started; 380 unsigned long n_rp_nocb_defer_wakeup; 381 unsigned long n_rp_need_nothing; 382 383 /* 6) _rcu_barrier(), OOM callbacks, and expediting. */ 384 struct rcu_head barrier_head; 385 #ifdef CONFIG_RCU_FAST_NO_HZ 386 struct rcu_head oom_head; 387 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 388 struct mutex exp_funnel_mutex; 389 atomic_long_t expedited_workdone0; /* # done by others #0. */ 390 atomic_long_t expedited_workdone1; /* # done by others #1. */ 391 atomic_long_t expedited_workdone2; /* # done by others #2. */ 392 atomic_long_t expedited_workdone3; /* # done by others #3. */ 393 394 /* 7) Callback offloading. */ 395 #ifdef CONFIG_RCU_NOCB_CPU 396 struct rcu_head *nocb_head; /* CBs waiting for kthread. */ 397 struct rcu_head **nocb_tail; 398 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ 399 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ 400 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ 401 struct rcu_head **nocb_follower_tail; 402 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ 403 struct task_struct *nocb_kthread; 404 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 405 406 /* The following fields are used by the leader, hence own cacheline. */ 407 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; 408 /* CBs waiting for GP. */ 409 struct rcu_head **nocb_gp_tail; 410 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ 411 struct rcu_data *nocb_next_follower; 412 /* Next follower in wakeup chain. */ 413 414 /* The following fields are used by the follower, hence new cachline. */ 415 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp; 416 /* Leader CPU takes GP-end wakeups. */ 417 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 418 419 /* 8) RCU CPU stall data. */ 420 unsigned int softirq_snap; /* Snapshot of softirq activity. */ 421 422 int cpu; 423 struct rcu_state *rsp; 424 }; 425 426 /* Values for nocb_defer_wakeup field in struct rcu_data. */ 427 #define RCU_NOGP_WAKE_NOT 0 428 #define RCU_NOGP_WAKE 1 429 #define RCU_NOGP_WAKE_FORCE 2 430 431 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) 432 /* For jiffies_till_first_fqs and */ 433 /* and jiffies_till_next_fqs. */ 434 435 #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ 436 /* delay between bouts of */ 437 /* quiescent-state forcing. */ 438 439 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ 440 /* at least one scheduling clock */ 441 /* irq before ratting on them. */ 442 443 #define rcu_wait(cond) \ 444 do { \ 445 for (;;) { \ 446 set_current_state(TASK_INTERRUPTIBLE); \ 447 if (cond) \ 448 break; \ 449 schedule(); \ 450 } \ 451 __set_current_state(TASK_RUNNING); \ 452 } while (0) 453 454 /* 455 * RCU global state, including node hierarchy. This hierarchy is 456 * represented in "heap" form in a dense array. The root (first level) 457 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second 458 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), 459 * and the third level in ->node[m+1] and following (->node[m+1] referenced 460 * by ->level[2]). The number of levels is determined by the number of 461 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" 462 * consisting of a single rcu_node. 463 */ 464 struct rcu_state { 465 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ 466 struct rcu_node *level[RCU_NUM_LVLS + 1]; 467 /* Hierarchy levels (+1 to */ 468 /* shut bogus gcc warning) */ 469 u8 flavor_mask; /* bit in flavor mask. */ 470 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 471 call_rcu_func_t call; /* call_rcu() flavor. */ 472 int ncpus; /* # CPUs seen so far. */ 473 474 /* The following fields are guarded by the root rcu_node's lock. */ 475 476 u8 boost ____cacheline_internodealigned_in_smp; 477 /* Subject to priority boost. */ 478 unsigned long gpnum; /* Current gp number. */ 479 unsigned long completed; /* # of last completed gp. */ 480 struct task_struct *gp_kthread; /* Task for grace periods. */ 481 wait_queue_head_t gp_wq; /* Where GP task waits. */ 482 short gp_flags; /* Commands for GP task. */ 483 short gp_state; /* GP kthread sleep state. */ 484 485 /* End of fields guarded by root rcu_node's lock. */ 486 487 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp; 488 /* Protect following fields. */ 489 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ 490 /* need a grace period. */ 491 struct rcu_head **orphan_nxttail; /* Tail of above. */ 492 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */ 493 /* are ready to invoke. */ 494 struct rcu_head **orphan_donetail; /* Tail of above. */ 495 long qlen_lazy; /* Number of lazy callbacks. */ 496 long qlen; /* Total number of callbacks. */ 497 /* End of fields guarded by orphan_lock. */ 498 499 struct mutex barrier_mutex; /* Guards barrier fields. */ 500 atomic_t barrier_cpu_count; /* # CPUs waiting on. */ 501 struct completion barrier_completion; /* Wake at barrier end. */ 502 unsigned long barrier_sequence; /* ++ at start and end of */ 503 /* _rcu_barrier(). */ 504 /* End of fields guarded by barrier_mutex. */ 505 506 unsigned long expedited_sequence; /* Take a ticket. */ 507 atomic_long_t expedited_normal; /* # fallbacks to normal. */ 508 atomic_t expedited_need_qs; /* # CPUs left to check in. */ 509 wait_queue_head_t expedited_wq; /* Wait for check-ins. */ 510 int ncpus_snap; /* # CPUs seen last time. */ 511 512 unsigned long jiffies_force_qs; /* Time at which to invoke */ 513 /* force_quiescent_state(). */ 514 unsigned long n_force_qs; /* Number of calls to */ 515 /* force_quiescent_state(). */ 516 unsigned long n_force_qs_lh; /* ~Number of calls leaving */ 517 /* due to lock unavailable. */ 518 unsigned long n_force_qs_ngp; /* Number of calls leaving */ 519 /* due to no GP active. */ 520 unsigned long gp_start; /* Time at which GP started, */ 521 /* but in jiffies. */ 522 unsigned long gp_activity; /* Time of last GP kthread */ 523 /* activity in jiffies. */ 524 unsigned long jiffies_stall; /* Time at which to check */ 525 /* for CPU stalls. */ 526 unsigned long jiffies_resched; /* Time at which to resched */ 527 /* a reluctant CPU. */ 528 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ 529 /* GP start. */ 530 unsigned long gp_max; /* Maximum GP duration in */ 531 /* jiffies. */ 532 const char *name; /* Name of structure. */ 533 char abbr; /* Abbreviated name. */ 534 struct list_head flavors; /* List of RCU flavors. */ 535 }; 536 537 /* Values for rcu_state structure's gp_flags field. */ 538 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ 539 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 540 541 /* Values for rcu_state structure's gp_state field. */ 542 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ 543 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 544 #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 545 #define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */ 546 #define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */ 547 #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */ 548 #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */ 549 550 #ifndef RCU_TREE_NONCORE 551 static const char * const gp_state_names[] = { 552 "RCU_GP_IDLE", 553 "RCU_GP_WAIT_GPS", 554 "RCU_GP_DONE_GPS", 555 "RCU_GP_WAIT_FQS", 556 "RCU_GP_DOING_FQS", 557 "RCU_GP_CLEANUP", 558 "RCU_GP_CLEANED", 559 }; 560 #endif /* #ifndef RCU_TREE_NONCORE */ 561 562 extern struct list_head rcu_struct_flavors; 563 564 /* Sequence through rcu_state structures for each RCU flavor. */ 565 #define for_each_rcu_flavor(rsp) \ 566 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 567 568 /* 569 * RCU implementation internal declarations: 570 */ 571 extern struct rcu_state rcu_sched_state; 572 573 extern struct rcu_state rcu_bh_state; 574 575 #ifdef CONFIG_PREEMPT_RCU 576 extern struct rcu_state rcu_preempt_state; 577 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 578 579 #ifdef CONFIG_RCU_BOOST 580 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 581 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); 582 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 583 DECLARE_PER_CPU(char, rcu_cpu_has_work); 584 #endif /* #ifdef CONFIG_RCU_BOOST */ 585 586 #ifndef RCU_TREE_NONCORE 587 588 /* Forward declarations for rcutree_plugin.h */ 589 static void rcu_bootup_announce(void); 590 static void rcu_preempt_note_context_switch(void); 591 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 592 #ifdef CONFIG_HOTPLUG_CPU 593 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 594 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 595 static void rcu_print_detail_task_stall(struct rcu_state *rsp); 596 static int rcu_print_task_stall(struct rcu_node *rnp); 597 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 598 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 599 static void rcu_preempt_check_callbacks(void); 600 void call_rcu(struct rcu_head *head, rcu_callback_t func); 601 static void __init __rcu_init_preempt(void); 602 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 603 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 604 static void invoke_rcu_callbacks_kthread(void); 605 static bool rcu_is_callbacks_kthread(void); 606 #ifdef CONFIG_RCU_BOOST 607 static void rcu_preempt_do_callbacks(void); 608 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 609 struct rcu_node *rnp); 610 #endif /* #ifdef CONFIG_RCU_BOOST */ 611 static void __init rcu_spawn_boost_kthreads(void); 612 static void rcu_prepare_kthreads(int cpu); 613 static void rcu_cleanup_after_idle(void); 614 static void rcu_prepare_for_idle(void); 615 static void rcu_idle_count_callbacks_posted(void); 616 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 617 static void print_cpu_stall_info_begin(void); 618 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); 619 static void print_cpu_stall_info_end(void); 620 static void zero_cpu_stall_ticks(struct rcu_data *rdp); 621 static void increment_cpu_stall_ticks(void); 622 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); 623 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); 624 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); 625 static void rcu_init_one_nocb(struct rcu_node *rnp); 626 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 627 bool lazy, unsigned long flags); 628 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 629 struct rcu_data *rdp, 630 unsigned long flags); 631 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); 632 static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 633 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 634 static void rcu_spawn_all_nocb_kthreads(int cpu); 635 static void __init rcu_spawn_nocb_kthreads(void); 636 #ifdef CONFIG_RCU_NOCB_CPU 637 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); 638 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 639 static void __maybe_unused rcu_kick_nohz_cpu(int cpu); 640 static bool init_nocb_callback_list(struct rcu_data *rdp); 641 static void rcu_sysidle_enter(int irq); 642 static void rcu_sysidle_exit(int irq); 643 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 644 unsigned long *maxj); 645 static bool is_sysidle_rcu_state(struct rcu_state *rsp); 646 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 647 unsigned long maxj); 648 static void rcu_bind_gp_kthread(void); 649 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); 650 static bool rcu_nohz_full_cpu(struct rcu_state *rsp); 651 static void rcu_dynticks_task_enter(void); 652 static void rcu_dynticks_task_exit(void); 653 654 #endif /* #ifndef RCU_TREE_NONCORE */ 655 656 #ifdef CONFIG_RCU_TRACE 657 /* Read out queue lengths for tracing. */ 658 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 659 { 660 #ifdef CONFIG_RCU_NOCB_CPU 661 *ql = atomic_long_read(&rdp->nocb_q_count); 662 *qll = atomic_long_read(&rdp->nocb_q_count_lazy); 663 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 664 *ql = 0; 665 *qll = 0; 666 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 667 } 668 #endif /* #ifdef CONFIG_RCU_TRACE */ 669 670 /* 671 * Place this after a lock-acquisition primitive to guarantee that 672 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies 673 * if the UNLOCK and LOCK are executed by the same CPU or if the 674 * UNLOCK and LOCK operate on the same lock variable. 675 */ 676 #ifdef CONFIG_PPC 677 #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ 678 #else /* #ifdef CONFIG_PPC */ 679 #define smp_mb__after_unlock_lock() do { } while (0) 680 #endif /* #else #ifdef CONFIG_PPC */ 681 682 /* 683 * Wrappers for the rcu_node::lock acquire. 684 * 685 * Because the rcu_nodes form a tree, the tree traversal locking will observe 686 * different lock values, this in turn means that an UNLOCK of one level 687 * followed by a LOCK of another level does not imply a full memory barrier; 688 * and most importantly transitivity is lost. 689 * 690 * In order to restore full ordering between tree levels, augment the regular 691 * lock acquire functions with smp_mb__after_unlock_lock(). 692 */ 693 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) 694 { 695 raw_spin_lock(&rnp->lock); 696 smp_mb__after_unlock_lock(); 697 } 698 699 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) 700 { 701 raw_spin_lock_irq(&rnp->lock); 702 smp_mb__after_unlock_lock(); 703 } 704 705 #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ 706 do { \ 707 typecheck(unsigned long, flags); \ 708 raw_spin_lock_irqsave(&(rnp)->lock, flags); \ 709 smp_mb__after_unlock_lock(); \ 710 } while (0) 711 712 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) 713 { 714 bool locked = raw_spin_trylock(&rnp->lock); 715 716 if (locked) 717 smp_mb__after_unlock_lock(); 718 return locked; 719 } 720