1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions. 5 * 6 * Copyright IBM Corporation, 2008 7 * 8 * Author: Ingo Molnar <mingo@elte.hu> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 */ 11 12 #include <linux/cache.h> 13 #include <linux/kthread.h> 14 #include <linux/spinlock.h> 15 #include <linux/rtmutex.h> 16 #include <linux/threads.h> 17 #include <linux/cpumask.h> 18 #include <linux/seqlock.h> 19 #include <linux/swait.h> 20 #include <linux/rcu_node_tree.h> 21 22 #include "rcu_segcblist.h" 23 24 /* Communicate arguments to a workqueue handler. */ 25 struct rcu_exp_work { 26 unsigned long rew_s; 27 #ifdef CONFIG_RCU_EXP_KTHREAD 28 struct kthread_work rew_work; 29 #else 30 struct work_struct rew_work; 31 #endif /* CONFIG_RCU_EXP_KTHREAD */ 32 }; 33 34 /* RCU's kthread states for tracing. */ 35 #define RCU_KTHREAD_STOPPED 0 36 #define RCU_KTHREAD_RUNNING 1 37 #define RCU_KTHREAD_WAITING 2 38 #define RCU_KTHREAD_OFFCPU 3 39 #define RCU_KTHREAD_YIELDING 4 40 #define RCU_KTHREAD_MAX 4 41 42 /* 43 * Definition for node within the RCU grace-period-detection hierarchy. 44 */ 45 struct rcu_node { 46 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ 47 /* some rcu_state fields as well as */ 48 /* following. */ 49 unsigned long gp_seq; /* Track rsp->gp_seq. */ 50 unsigned long gp_seq_needed; /* Track furthest future GP request. */ 51 unsigned long completedqs; /* All QSes done for this node. */ 52 unsigned long qsmask; /* CPUs or groups that need to switch in */ 53 /* order for current grace period to proceed.*/ 54 /* In leaf rcu_node, each bit corresponds to */ 55 /* an rcu_data structure, otherwise, each */ 56 /* bit corresponds to a child rcu_node */ 57 /* structure. */ 58 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */ 59 unsigned long qsmaskinit; 60 /* Per-GP initial value for qsmask. */ 61 /* Initialized from ->qsmaskinitnext at the */ 62 /* beginning of each grace period. */ 63 unsigned long qsmaskinitnext; 64 unsigned long expmask; /* CPUs or groups that need to check in */ 65 /* to allow the current expedited GP */ 66 /* to complete. */ 67 unsigned long expmaskinit; 68 /* Per-GP initial values for expmask. */ 69 /* Initialized from ->expmaskinitnext at the */ 70 /* beginning of each expedited GP. */ 71 unsigned long expmaskinitnext; 72 /* Online CPUs for next expedited GP. */ 73 /* Any CPU that has ever been online will */ 74 /* have its bit set. */ 75 unsigned long cbovldmask; 76 /* CPUs experiencing callback overload. */ 77 unsigned long ffmask; /* Fully functional CPUs. */ 78 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 79 /* Only one bit will be set in this mask. */ 80 int grplo; /* lowest-numbered CPU here. */ 81 int grphi; /* highest-numbered CPU here. */ 82 u8 grpnum; /* group number for next level up. */ 83 u8 level; /* root is at level 0. */ 84 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ 85 /* exit RCU read-side critical sections */ 86 /* before propagating offline up the */ 87 /* rcu_node tree? */ 88 struct rcu_node *parent; 89 struct list_head blkd_tasks; 90 /* Tasks blocked in RCU read-side critical */ 91 /* section. Tasks are placed at the head */ 92 /* of this list and age towards the tail. */ 93 struct list_head *gp_tasks; 94 /* Pointer to the first task blocking the */ 95 /* current grace period, or NULL if there */ 96 /* is no such task. */ 97 struct list_head *exp_tasks; 98 /* Pointer to the first task blocking the */ 99 /* current expedited grace period, or NULL */ 100 /* if there is no such task. If there */ 101 /* is no current expedited grace period, */ 102 /* then there can cannot be any such task. */ 103 struct list_head *boost_tasks; 104 /* Pointer to first task that needs to be */ 105 /* priority boosted, or NULL if no priority */ 106 /* boosting is needed for this rcu_node */ 107 /* structure. If there are no tasks */ 108 /* queued on this rcu_node structure that */ 109 /* are blocking the current grace period, */ 110 /* there can be no such task. */ 111 struct rt_mutex boost_mtx; 112 /* Used only for the priority-boosting */ 113 /* side effect, not as a lock. */ 114 unsigned long boost_time; 115 /* When to start boosting (jiffies). */ 116 struct mutex boost_kthread_mutex; 117 /* Exclusion for thread spawning and affinity */ 118 /* manipulation. */ 119 struct task_struct *boost_kthread_task; 120 /* kthread that takes care of priority */ 121 /* boosting for this rcu_node structure. */ 122 unsigned int boost_kthread_status; 123 /* State of boost_kthread_task for tracing. */ 124 unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */ 125 #ifdef CONFIG_RCU_NOCB_CPU 126 struct swait_queue_head nocb_gp_wq[2]; 127 /* Place for rcu_nocb_kthread() to wait GP. */ 128 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 129 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 130 131 spinlock_t exp_lock ____cacheline_internodealigned_in_smp; 132 unsigned long exp_seq_rq; 133 wait_queue_head_t exp_wq[4]; 134 struct rcu_exp_work rew; 135 bool exp_need_flush; /* Need to flush workitem? */ 136 raw_spinlock_t exp_poll_lock; 137 /* Lock and data for polled expedited grace periods. */ 138 unsigned long exp_seq_poll_rq; 139 struct work_struct exp_poll_wq; 140 } ____cacheline_internodealigned_in_smp; 141 142 /* 143 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and 144 * are indexed relative to this interval rather than the global CPU ID space. 145 * This generates the bit for a CPU in node-local masks. 146 */ 147 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) 148 149 /* 150 * Union to allow "aggregate OR" operation on the need for a quiescent 151 * state by the normal and expedited grace periods. 152 */ 153 union rcu_noqs { 154 struct { 155 u8 norm; 156 u8 exp; 157 } b; /* Bits. */ 158 u16 s; /* Set of bits, aggregate OR here. */ 159 }; 160 161 /* Per-CPU data for read-copy update. */ 162 struct rcu_data { 163 /* 1) quiescent-state and grace-period handling : */ 164 unsigned long gp_seq; /* Track rsp->gp_seq counter. */ 165 unsigned long gp_seq_needed; /* Track furthest future GP request. */ 166 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ 167 bool core_needs_qs; /* Core waits for quiescent state. */ 168 bool beenonline; /* CPU online at least once. */ 169 bool gpwrap; /* Possible ->gp_seq wrap. */ 170 bool cpu_started; /* RCU watching this onlining CPU. */ 171 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 172 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 173 unsigned long ticks_this_gp; /* The number of scheduling-clock */ 174 /* ticks this CPU has handled */ 175 /* during and after the last grace */ 176 /* period it is aware of. */ 177 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ 178 bool defer_qs_iw_pending; /* Scheduler attention pending? */ 179 struct work_struct strict_work; /* Schedule readers for strict GPs. */ 180 181 /* 2) batch handling */ 182 struct rcu_segcblist cblist; /* Segmented callback list, with */ 183 /* different callbacks waiting for */ 184 /* different grace periods. */ 185 long qlen_last_fqs_check; 186 /* qlen at last check for QS forcing */ 187 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */ 188 unsigned long n_force_qs_snap; 189 /* did other CPU force QS recently? */ 190 long blimit; /* Upper limit on a processed batch */ 191 192 /* 3) dynticks interface. */ 193 int dynticks_snap; /* Per-GP tracking for dynticks. */ 194 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ 195 bool rcu_urgent_qs; /* GP old need light quiescent state. */ 196 bool rcu_forced_tick; /* Forced tick to provide QS. */ 197 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ 198 199 /* 4) rcu_barrier(), OOM callbacks, and expediting. */ 200 unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */ 201 struct rcu_head barrier_head; 202 int exp_dynticks_snap; /* Double-check need for IPI. */ 203 204 /* 5) Callback offloading. */ 205 #ifdef CONFIG_RCU_NOCB_CPU 206 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ 207 struct swait_queue_head nocb_state_wq; /* For offloading state changes */ 208 struct task_struct *nocb_gp_kthread; 209 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ 210 atomic_t nocb_lock_contended; /* Contention experienced. */ 211 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 212 struct timer_list nocb_timer; /* Enforce finite deferral. */ 213 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ 214 struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */ 215 /* spawning */ 216 217 /* The following fields are used by call_rcu, hence own cacheline. */ 218 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp; 219 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */ 220 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */ 221 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */ 222 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */ 223 224 /* The following fields are used by GP kthread, hence own cacheline. */ 225 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp; 226 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */ 227 u8 nocb_gp_bypass; /* Found a bypass on last scan? */ 228 u8 nocb_gp_gp; /* GP to wait for on last scan? */ 229 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */ 230 unsigned long nocb_gp_loops; /* # passes through wait code. */ 231 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */ 232 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */ 233 struct task_struct *nocb_cb_kthread; 234 struct list_head nocb_head_rdp; /* 235 * Head of rcu_data list in wakeup chain, 236 * if rdp_gp. 237 */ 238 struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */ 239 struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */ 240 241 /* The following fields are used by CB kthread, hence new cacheline. */ 242 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; 243 /* GP rdp takes GP-end wakeups. */ 244 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 245 246 /* 6) RCU priority boosting. */ 247 struct task_struct *rcu_cpu_kthread_task; 248 /* rcuc per-CPU kthread or NULL. */ 249 unsigned int rcu_cpu_kthread_status; 250 char rcu_cpu_has_work; 251 unsigned long rcuc_activity; 252 253 /* 7) Diagnostic data, including RCU CPU stall warnings. */ 254 unsigned int softirq_snap; /* Snapshot of softirq activity. */ 255 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ 256 struct irq_work rcu_iw; /* Check for non-irq activity. */ 257 bool rcu_iw_pending; /* Is ->rcu_iw pending? */ 258 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ 259 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */ 260 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */ 261 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ 262 short rcu_onl_gp_flags; /* ->gp_flags at last online. */ 263 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ 264 unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */ 265 266 long lazy_len; /* Length of buffered lazy callbacks. */ 267 int cpu; 268 }; 269 270 /* Values for nocb_defer_wakeup field in struct rcu_data. */ 271 #define RCU_NOCB_WAKE_NOT 0 272 #define RCU_NOCB_WAKE_BYPASS 1 273 #define RCU_NOCB_WAKE_LAZY 2 274 #define RCU_NOCB_WAKE 3 275 #define RCU_NOCB_WAKE_FORCE 4 276 277 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) 278 /* For jiffies_till_first_fqs and */ 279 /* and jiffies_till_next_fqs. */ 280 281 #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ 282 /* delay between bouts of */ 283 /* quiescent-state forcing. */ 284 285 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ 286 /* at least one scheduling clock */ 287 /* irq before ratting on them. */ 288 289 #define rcu_wait(cond) \ 290 do { \ 291 for (;;) { \ 292 set_current_state(TASK_INTERRUPTIBLE); \ 293 if (cond) \ 294 break; \ 295 schedule(); \ 296 } \ 297 __set_current_state(TASK_RUNNING); \ 298 } while (0) 299 300 /* 301 * RCU global state, including node hierarchy. This hierarchy is 302 * represented in "heap" form in a dense array. The root (first level) 303 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second 304 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), 305 * and the third level in ->node[m+1] and following (->node[m+1] referenced 306 * by ->level[2]). The number of levels is determined by the number of 307 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" 308 * consisting of a single rcu_node. 309 */ 310 struct rcu_state { 311 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ 312 struct rcu_node *level[RCU_NUM_LVLS + 1]; 313 /* Hierarchy levels (+1 to */ 314 /* shut bogus gcc warning) */ 315 int ncpus; /* # CPUs seen so far. */ 316 int n_online_cpus; /* # CPUs online for RCU. */ 317 318 /* The following fields are guarded by the root rcu_node's lock. */ 319 320 unsigned long gp_seq ____cacheline_internodealigned_in_smp; 321 /* Grace-period sequence #. */ 322 unsigned long gp_max; /* Maximum GP duration in */ 323 /* jiffies. */ 324 struct task_struct *gp_kthread; /* Task for grace periods. */ 325 struct swait_queue_head gp_wq; /* Where GP task waits. */ 326 short gp_flags; /* Commands for GP task. */ 327 short gp_state; /* GP kthread sleep state. */ 328 unsigned long gp_wake_time; /* Last GP kthread wake. */ 329 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */ 330 unsigned long gp_seq_polled; /* GP seq for polled API. */ 331 unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */ 332 unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */ 333 334 /* End of fields guarded by root rcu_node's lock. */ 335 336 struct mutex barrier_mutex; /* Guards barrier fields. */ 337 atomic_t barrier_cpu_count; /* # CPUs waiting on. */ 338 struct completion barrier_completion; /* Wake at barrier end. */ 339 unsigned long barrier_sequence; /* ++ at start and end of */ 340 /* rcu_barrier(). */ 341 /* End of fields guarded by barrier_mutex. */ 342 343 raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */ 344 345 struct mutex exp_mutex; /* Serialize expedited GP. */ 346 struct mutex exp_wake_mutex; /* Serialize wakeup. */ 347 unsigned long expedited_sequence; /* Take a ticket. */ 348 atomic_t expedited_need_qs; /* # CPUs left to check in. */ 349 struct swait_queue_head expedited_wq; /* Wait for check-ins. */ 350 int ncpus_snap; /* # CPUs seen last time. */ 351 u8 cbovld; /* Callback overload now? */ 352 u8 cbovldnext; /* ^ ^ next time? */ 353 354 unsigned long jiffies_force_qs; /* Time at which to invoke */ 355 /* force_quiescent_state(). */ 356 unsigned long jiffies_kick_kthreads; /* Time at which to kick */ 357 /* kthreads, if configured. */ 358 unsigned long n_force_qs; /* Number of calls to */ 359 /* force_quiescent_state(). */ 360 unsigned long gp_start; /* Time at which GP started, */ 361 /* but in jiffies. */ 362 unsigned long gp_end; /* Time last GP ended, again */ 363 /* in jiffies. */ 364 unsigned long gp_activity; /* Time of last GP kthread */ 365 /* activity in jiffies. */ 366 unsigned long gp_req_activity; /* Time of last GP request */ 367 /* in jiffies. */ 368 unsigned long jiffies_stall; /* Time at which to check */ 369 /* for CPU stalls. */ 370 unsigned long jiffies_resched; /* Time at which to resched */ 371 /* a reluctant CPU. */ 372 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ 373 /* GP start. */ 374 const char *name; /* Name of structure. */ 375 char abbr; /* Abbreviated name. */ 376 377 arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; 378 /* Synchronize offline with */ 379 /* GP pre-initialization. */ 380 int nocb_is_setup; /* nocb is setup from boot */ 381 }; 382 383 /* Values for rcu_state structure's gp_flags field. */ 384 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ 385 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 386 #define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */ 387 388 /* Values for rcu_state structure's gp_state field. */ 389 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ 390 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 391 #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 392 #define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */ 393 #define RCU_GP_INIT 4 /* Grace-period initialization. */ 394 #define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */ 395 #define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */ 396 #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */ 397 #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */ 398 399 /* 400 * In order to export the rcu_state name to the tracing tools, it 401 * needs to be added in the __tracepoint_string section. 402 * This requires defining a separate variable tp_<sname>_varname 403 * that points to the string being used, and this will allow 404 * the tracing userspace tools to be able to decipher the string 405 * address to the matching string. 406 */ 407 #ifdef CONFIG_PREEMPT_RCU 408 #define RCU_ABBR 'p' 409 #define RCU_NAME_RAW "rcu_preempt" 410 #else /* #ifdef CONFIG_PREEMPT_RCU */ 411 #define RCU_ABBR 's' 412 #define RCU_NAME_RAW "rcu_sched" 413 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 414 #ifndef CONFIG_TRACING 415 #define RCU_NAME RCU_NAME_RAW 416 #else /* #ifdef CONFIG_TRACING */ 417 static char rcu_name[] = RCU_NAME_RAW; 418 static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; 419 #define RCU_NAME rcu_name 420 #endif /* #else #ifdef CONFIG_TRACING */ 421 422 /* Forward declarations for tree_plugin.h */ 423 static void rcu_bootup_announce(void); 424 static void rcu_qs(void); 425 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 426 #ifdef CONFIG_HOTPLUG_CPU 427 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 428 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 429 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 430 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 431 static void rcu_flavor_sched_clock_irq(int user); 432 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 433 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 434 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 435 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); 436 static void rcu_cpu_kthread_setup(unsigned int cpu); 437 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); 438 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 439 static bool rcu_preempt_need_deferred_qs(struct task_struct *t); 440 static void zero_cpu_stall_ticks(struct rcu_data *rdp); 441 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); 442 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); 443 static void rcu_init_one_nocb(struct rcu_node *rnp); 444 static bool wake_nocb_gp(struct rcu_data *rdp, bool force); 445 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 446 unsigned long j, bool lazy); 447 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 448 bool *was_alldone, unsigned long flags, 449 bool lazy); 450 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, 451 unsigned long flags); 452 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); 453 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); 454 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 455 static void rcu_spawn_cpu_nocb_kthread(int cpu); 456 static void show_rcu_nocb_state(struct rcu_data *rdp); 457 static void rcu_nocb_lock(struct rcu_data *rdp); 458 static void rcu_nocb_unlock(struct rcu_data *rdp); 459 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, 460 unsigned long flags); 461 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); 462 #ifdef CONFIG_RCU_NOCB_CPU 463 static void __init rcu_organize_nocb_kthreads(void); 464 465 /* 466 * Disable IRQs before checking offloaded state so that local 467 * locking is safe against concurrent de-offloading. 468 */ 469 #define rcu_nocb_lock_irqsave(rdp, flags) \ 470 do { \ 471 local_irq_save(flags); \ 472 if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ 473 raw_spin_lock(&(rdp)->nocb_lock); \ 474 } while (0) 475 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 476 #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) 477 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 478 479 static void rcu_bind_gp_kthread(void); 480 static bool rcu_nohz_full_cpu(void); 481 482 /* Forward declarations for tree_stall.h */ 483 static void record_gp_stall_check_time(void); 484 static void rcu_iw_handler(struct irq_work *iwp); 485 static void check_cpu_stall(struct rcu_data *rdp); 486 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, 487 const unsigned long gpssdelay); 488 489 /* Forward declarations for tree_exp.h. */ 490 static void sync_rcu_do_polled_gp(struct work_struct *wp); 491