1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/percpu.h> 36 #include <linux/notifier.h> 37 #include <linux/cpu.h> 38 #include <linux/mutex.h> 39 #include <linux/time.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/wait.h> 42 #include <linux/kthread.h> 43 #include <uapi/linux/sched/types.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 #include <linux/random.h> 47 #include <linux/trace_events.h> 48 #include <linux/suspend.h> 49 #include <linux/ftrace.h> 50 #include <linux/tick.h> 51 #include <linux/sysrq.h> 52 #include <linux/kprobes.h> 53 #include <linux/gfp.h> 54 #include <linux/oom.h> 55 #include <linux/smpboot.h> 56 #include <linux/jiffies.h> 57 #include <linux/slab.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/sched/clock.h> 60 #include <linux/vmalloc.h> 61 #include <linux/mm.h> 62 #include <linux/kasan.h> 63 #include "../time/tick-internal.h" 64 65 #include "tree.h" 66 #include "rcu.h" 67 68 #ifdef MODULE_PARAM_PREFIX 69 #undef MODULE_PARAM_PREFIX 70 #endif 71 #define MODULE_PARAM_PREFIX "rcutree." 72 73 #ifndef data_race 74 #define data_race(expr) \ 75 ({ \ 76 expr; \ 77 }) 78 #endif 79 #ifndef ASSERT_EXCLUSIVE_WRITER 80 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0) 81 #endif 82 #ifndef ASSERT_EXCLUSIVE_ACCESS 83 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0) 84 #endif 85 86 /* Data structures. */ 87 88 /* 89 * Steal a bit from the bottom of ->dynticks for idle entry/exit 90 * control. Initially this is for TLB flushing. 91 */ 92 #define RCU_DYNTICK_CTRL_MASK 0x1 93 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 94 95 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 96 .dynticks_nesting = 1, 97 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 98 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 99 }; 100 static struct rcu_state rcu_state = { 101 .level = { &rcu_state.node[0] }, 102 .gp_state = RCU_GP_IDLE, 103 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 104 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 105 .name = RCU_NAME, 106 .abbr = RCU_ABBR, 107 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 108 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 109 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 110 }; 111 112 /* Dump rcu_node combining tree at boot to verify correct setup. */ 113 static bool dump_tree; 114 module_param(dump_tree, bool, 0444); 115 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 116 static bool use_softirq = true; 117 module_param(use_softirq, bool, 0444); 118 /* Control rcu_node-tree auto-balancing at boot time. */ 119 static bool rcu_fanout_exact; 120 module_param(rcu_fanout_exact, bool, 0444); 121 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 122 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 123 module_param(rcu_fanout_leaf, int, 0444); 124 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 125 /* Number of rcu_nodes at specified level. */ 126 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 127 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 128 129 /* 130 * The rcu_scheduler_active variable is initialized to the value 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 133 * RCU can assume that there is but one task, allowing RCU to (for example) 134 * optimize synchronize_rcu() to a simple barrier(). When this variable 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 136 * to detect real grace periods. This variable is also used to suppress 137 * boot-time false positives from lockdep-RCU error checking. Finally, it 138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 139 * is fully initialized, including all of its kthreads having been spawned. 140 */ 141 int rcu_scheduler_active __read_mostly; 142 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 143 144 /* 145 * The rcu_scheduler_fully_active variable transitions from zero to one 146 * during the early_initcall() processing, which is after the scheduler 147 * is capable of creating new tasks. So RCU processing (for example, 148 * creating tasks for RCU priority boosting) must be delayed until after 149 * rcu_scheduler_fully_active transitions from zero to one. We also 150 * currently delay invocation of any RCU callbacks until after this point. 151 * 152 * It might later prove better for people registering RCU callbacks during 153 * early boot to take responsibility for these callbacks, but one step at 154 * a time. 155 */ 156 static int rcu_scheduler_fully_active __read_mostly; 157 158 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 159 unsigned long gps, unsigned long flags); 160 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 161 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 162 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 163 static void invoke_rcu_core(void); 164 static void rcu_report_exp_rdp(struct rcu_data *rdp); 165 static void sync_sched_exp_online_cleanup(int cpu); 166 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 167 168 /* rcuc/rcub kthread realtime priority */ 169 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 170 module_param(kthread_prio, int, 0444); 171 172 /* Delay in jiffies for grace-period initialization delays, debug only. */ 173 174 static int gp_preinit_delay; 175 module_param(gp_preinit_delay, int, 0444); 176 static int gp_init_delay; 177 module_param(gp_init_delay, int, 0444); 178 static int gp_cleanup_delay; 179 module_param(gp_cleanup_delay, int, 0444); 180 181 /* 182 * This rcu parameter is runtime-read-only. It reflects 183 * a minimum allowed number of objects which can be cached 184 * per-CPU. Object size is equal to one page. This value 185 * can be changed at boot time. 186 */ 187 static int rcu_min_cached_objs = 2; 188 module_param(rcu_min_cached_objs, int, 0444); 189 190 /* Retrieve RCU kthreads priority for rcutorture */ 191 int rcu_get_gp_kthreads_prio(void) 192 { 193 return kthread_prio; 194 } 195 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 196 197 /* 198 * Number of grace periods between delays, normalized by the duration of 199 * the delay. The longer the delay, the more the grace periods between 200 * each delay. The reason for this normalization is that it means that, 201 * for non-zero delays, the overall slowdown of grace periods is constant 202 * regardless of the duration of the delay. This arrangement balances 203 * the need for long delays to increase some race probabilities with the 204 * need for fast grace periods to increase other race probabilities. 205 */ 206 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 207 208 /* 209 * Compute the mask of online CPUs for the specified rcu_node structure. 210 * This will not be stable unless the rcu_node structure's ->lock is 211 * held, but the bit corresponding to the current CPU will be stable 212 * in most contexts. 213 */ 214 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 215 { 216 return READ_ONCE(rnp->qsmaskinitnext); 217 } 218 219 /* 220 * Return true if an RCU grace period is in progress. The READ_ONCE()s 221 * permit this function to be invoked without holding the root rcu_node 222 * structure's ->lock, but of course results can be subject to change. 223 */ 224 static int rcu_gp_in_progress(void) 225 { 226 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 227 } 228 229 /* 230 * Return the number of callbacks queued on the specified CPU. 231 * Handles both the nocbs and normal cases. 232 */ 233 static long rcu_get_n_cbs_cpu(int cpu) 234 { 235 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 236 237 if (rcu_segcblist_is_enabled(&rdp->cblist)) 238 return rcu_segcblist_n_cbs(&rdp->cblist); 239 return 0; 240 } 241 242 void rcu_softirq_qs(void) 243 { 244 rcu_qs(); 245 rcu_preempt_deferred_qs(current); 246 } 247 248 /* 249 * Record entry into an extended quiescent state. This is only to be 250 * called when not already in an extended quiescent state, that is, 251 * RCU is watching prior to the call to this function and is no longer 252 * watching upon return. 253 */ 254 static noinstr void rcu_dynticks_eqs_enter(void) 255 { 256 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 257 int seq; 258 259 /* 260 * CPUs seeing atomic_add_return() must see prior RCU read-side 261 * critical sections, and we also must force ordering with the 262 * next idle sojourn. 263 */ 264 rcu_dynticks_task_trace_enter(); // Before ->dynticks update! 265 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 266 // RCU is no longer watching. Better be in extended quiescent state! 267 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 268 (seq & RCU_DYNTICK_CTRL_CTR)); 269 /* Better not have special action (TLB flush) pending! */ 270 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 271 (seq & RCU_DYNTICK_CTRL_MASK)); 272 } 273 274 /* 275 * Record exit from an extended quiescent state. This is only to be 276 * called from an extended quiescent state, that is, RCU is not watching 277 * prior to the call to this function and is watching upon return. 278 */ 279 static noinstr void rcu_dynticks_eqs_exit(void) 280 { 281 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 282 int seq; 283 284 /* 285 * CPUs seeing atomic_add_return() must see prior idle sojourns, 286 * and we also must force ordering with the next RCU read-side 287 * critical section. 288 */ 289 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 290 // RCU is now watching. Better not be in an extended quiescent state! 291 rcu_dynticks_task_trace_exit(); // After ->dynticks update! 292 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 293 !(seq & RCU_DYNTICK_CTRL_CTR)); 294 if (seq & RCU_DYNTICK_CTRL_MASK) { 295 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); 296 smp_mb__after_atomic(); /* _exit after clearing mask. */ 297 } 298 } 299 300 /* 301 * Reset the current CPU's ->dynticks counter to indicate that the 302 * newly onlined CPU is no longer in an extended quiescent state. 303 * This will either leave the counter unchanged, or increment it 304 * to the next non-quiescent value. 305 * 306 * The non-atomic test/increment sequence works because the upper bits 307 * of the ->dynticks counter are manipulated only by the corresponding CPU, 308 * or when the corresponding CPU is offline. 309 */ 310 static void rcu_dynticks_eqs_online(void) 311 { 312 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 313 314 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) 315 return; 316 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 317 } 318 319 /* 320 * Is the current CPU in an extended quiescent state? 321 * 322 * No ordering, as we are sampling CPU-local information. 323 */ 324 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) 325 { 326 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 327 328 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); 329 } 330 331 /* 332 * Snapshot the ->dynticks counter with full ordering so as to allow 333 * stable comparison of this counter with past and future snapshots. 334 */ 335 static int rcu_dynticks_snap(struct rcu_data *rdp) 336 { 337 int snap = atomic_add_return(0, &rdp->dynticks); 338 339 return snap & ~RCU_DYNTICK_CTRL_MASK; 340 } 341 342 /* 343 * Return true if the snapshot returned from rcu_dynticks_snap() 344 * indicates that RCU is in an extended quiescent state. 345 */ 346 static bool rcu_dynticks_in_eqs(int snap) 347 { 348 return !(snap & RCU_DYNTICK_CTRL_CTR); 349 } 350 351 /* 352 * Return true if the CPU corresponding to the specified rcu_data 353 * structure has spent some time in an extended quiescent state since 354 * rcu_dynticks_snap() returned the specified snapshot. 355 */ 356 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 357 { 358 return snap != rcu_dynticks_snap(rdp); 359 } 360 361 /* 362 * Return true if the referenced integer is zero while the specified 363 * CPU remains within a single extended quiescent state. 364 */ 365 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) 366 { 367 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 368 int snap; 369 370 // If not quiescent, force back to earlier extended quiescent state. 371 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK | 372 RCU_DYNTICK_CTRL_CTR); 373 374 smp_rmb(); // Order ->dynticks and *vp reads. 375 if (READ_ONCE(*vp)) 376 return false; // Non-zero, so report failure; 377 smp_rmb(); // Order *vp read and ->dynticks re-read. 378 379 // If still in the same extended quiescent state, we are good! 380 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK); 381 } 382 383 /* 384 * Set the special (bottom) bit of the specified CPU so that it 385 * will take special action (such as flushing its TLB) on the 386 * next exit from an extended quiescent state. Returns true if 387 * the bit was successfully set, or false if the CPU was not in 388 * an extended quiescent state. 389 */ 390 bool rcu_eqs_special_set(int cpu) 391 { 392 int old; 393 int new; 394 int new_old; 395 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 396 397 new_old = atomic_read(&rdp->dynticks); 398 do { 399 old = new_old; 400 if (old & RCU_DYNTICK_CTRL_CTR) 401 return false; 402 new = old | RCU_DYNTICK_CTRL_MASK; 403 new_old = atomic_cmpxchg(&rdp->dynticks, old, new); 404 } while (new_old != old); 405 return true; 406 } 407 408 /* 409 * Let the RCU core know that this CPU has gone through the scheduler, 410 * which is a quiescent state. This is called when the need for a 411 * quiescent state is urgent, so we burn an atomic operation and full 412 * memory barriers to let the RCU core know about it, regardless of what 413 * this CPU might (or might not) do in the near future. 414 * 415 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 416 * 417 * The caller must have disabled interrupts and must not be idle. 418 */ 419 void rcu_momentary_dyntick_idle(void) 420 { 421 int special; 422 423 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 424 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 425 &this_cpu_ptr(&rcu_data)->dynticks); 426 /* It is illegal to call this from idle state. */ 427 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 428 rcu_preempt_deferred_qs(current); 429 } 430 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 431 432 /** 433 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 434 * 435 * If the current CPU is idle and running at a first-level (not nested) 436 * interrupt, or directly, from idle, return true. 437 * 438 * The caller must have at least disabled IRQs. 439 */ 440 static int rcu_is_cpu_rrupt_from_idle(void) 441 { 442 long nesting; 443 444 /* 445 * Usually called from the tick; but also used from smp_function_call() 446 * for expedited grace periods. This latter can result in running from 447 * the idle task, instead of an actual IPI. 448 */ 449 lockdep_assert_irqs_disabled(); 450 451 /* Check for counter underflows */ 452 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, 453 "RCU dynticks_nesting counter underflow!"); 454 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, 455 "RCU dynticks_nmi_nesting counter underflow/zero!"); 456 457 /* Are we at first interrupt nesting level? */ 458 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting); 459 if (nesting > 1) 460 return false; 461 462 /* 463 * If we're not in an interrupt, we must be in the idle task! 464 */ 465 WARN_ON_ONCE(!nesting && !is_idle_task(current)); 466 467 /* Does CPU appear to be idle from an RCU standpoint? */ 468 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; 469 } 470 471 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */ 472 #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */ 473 static long blimit = DEFAULT_RCU_BLIMIT; 474 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ 475 static long qhimark = DEFAULT_RCU_QHIMARK; 476 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ 477 static long qlowmark = DEFAULT_RCU_QLOMARK; 478 #define DEFAULT_RCU_QOVLD_MULT 2 479 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 480 static long qovld = DEFAULT_RCU_QOVLD; /* If this many pending, hammer QS. */ 481 static long qovld_calc = -1; /* No pre-initialization lock acquisitions! */ 482 483 module_param(blimit, long, 0444); 484 module_param(qhimark, long, 0444); 485 module_param(qlowmark, long, 0444); 486 module_param(qovld, long, 0444); 487 488 static ulong jiffies_till_first_fqs = ULONG_MAX; 489 static ulong jiffies_till_next_fqs = ULONG_MAX; 490 static bool rcu_kick_kthreads; 491 static int rcu_divisor = 7; 492 module_param(rcu_divisor, int, 0644); 493 494 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 495 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 496 module_param(rcu_resched_ns, long, 0644); 497 498 /* 499 * How long the grace period must be before we start recruiting 500 * quiescent-state help from rcu_note_context_switch(). 501 */ 502 static ulong jiffies_till_sched_qs = ULONG_MAX; 503 module_param(jiffies_till_sched_qs, ulong, 0444); 504 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 505 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 506 507 /* 508 * Make sure that we give the grace-period kthread time to detect any 509 * idle CPUs before taking active measures to force quiescent states. 510 * However, don't go below 100 milliseconds, adjusted upwards for really 511 * large systems. 512 */ 513 static void adjust_jiffies_till_sched_qs(void) 514 { 515 unsigned long j; 516 517 /* If jiffies_till_sched_qs was specified, respect the request. */ 518 if (jiffies_till_sched_qs != ULONG_MAX) { 519 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 520 return; 521 } 522 /* Otherwise, set to third fqs scan, but bound below on large system. */ 523 j = READ_ONCE(jiffies_till_first_fqs) + 524 2 * READ_ONCE(jiffies_till_next_fqs); 525 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 526 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 527 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 528 WRITE_ONCE(jiffies_to_sched_qs, j); 529 } 530 531 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 532 { 533 ulong j; 534 int ret = kstrtoul(val, 0, &j); 535 536 if (!ret) { 537 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 538 adjust_jiffies_till_sched_qs(); 539 } 540 return ret; 541 } 542 543 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 544 { 545 ulong j; 546 int ret = kstrtoul(val, 0, &j); 547 548 if (!ret) { 549 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 550 adjust_jiffies_till_sched_qs(); 551 } 552 return ret; 553 } 554 555 static struct kernel_param_ops first_fqs_jiffies_ops = { 556 .set = param_set_first_fqs_jiffies, 557 .get = param_get_ulong, 558 }; 559 560 static struct kernel_param_ops next_fqs_jiffies_ops = { 561 .set = param_set_next_fqs_jiffies, 562 .get = param_get_ulong, 563 }; 564 565 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 566 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 567 module_param(rcu_kick_kthreads, bool, 0644); 568 569 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 570 static int rcu_pending(int user); 571 572 /* 573 * Return the number of RCU GPs completed thus far for debug & stats. 574 */ 575 unsigned long rcu_get_gp_seq(void) 576 { 577 return READ_ONCE(rcu_state.gp_seq); 578 } 579 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 580 581 /* 582 * Return the number of RCU expedited batches completed thus far for 583 * debug & stats. Odd numbers mean that a batch is in progress, even 584 * numbers mean idle. The value returned will thus be roughly double 585 * the cumulative batches since boot. 586 */ 587 unsigned long rcu_exp_batches_completed(void) 588 { 589 return rcu_state.expedited_sequence; 590 } 591 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 592 593 /* 594 * Return the root node of the rcu_state structure. 595 */ 596 static struct rcu_node *rcu_get_root(void) 597 { 598 return &rcu_state.node[0]; 599 } 600 601 /* 602 * Send along grace-period-related data for rcutorture diagnostics. 603 */ 604 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 605 unsigned long *gp_seq) 606 { 607 switch (test_type) { 608 case RCU_FLAVOR: 609 *flags = READ_ONCE(rcu_state.gp_flags); 610 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 611 break; 612 default: 613 break; 614 } 615 } 616 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 617 618 /* 619 * Enter an RCU extended quiescent state, which can be either the 620 * idle loop or adaptive-tickless usermode execution. 621 * 622 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 623 * the possibility of usermode upcalls having messed up our count 624 * of interrupt nesting level during the prior busy period. 625 */ 626 static noinstr void rcu_eqs_enter(bool user) 627 { 628 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 629 630 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); 631 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); 632 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 633 rdp->dynticks_nesting == 0); 634 if (rdp->dynticks_nesting != 1) { 635 // RCU will still be watching, so just do accounting and leave. 636 rdp->dynticks_nesting--; 637 return; 638 } 639 640 lockdep_assert_irqs_disabled(); 641 instrumentation_begin(); 642 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); 643 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 644 rdp = this_cpu_ptr(&rcu_data); 645 do_nocb_deferred_wakeup(rdp); 646 rcu_prepare_for_idle(); 647 rcu_preempt_deferred_qs(current); 648 649 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 650 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 651 652 instrumentation_end(); 653 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 654 // RCU is watching here ... 655 rcu_dynticks_eqs_enter(); 656 // ... but is no longer watching here. 657 rcu_dynticks_task_enter(); 658 } 659 660 /** 661 * rcu_idle_enter - inform RCU that current CPU is entering idle 662 * 663 * Enter idle mode, in other words, -leave- the mode in which RCU 664 * read-side critical sections can occur. (Though RCU read-side 665 * critical sections can occur in irq handlers in idle, a possibility 666 * handled by irq_enter() and irq_exit().) 667 * 668 * If you add or remove a call to rcu_idle_enter(), be sure to test with 669 * CONFIG_RCU_EQS_DEBUG=y. 670 */ 671 void rcu_idle_enter(void) 672 { 673 lockdep_assert_irqs_disabled(); 674 rcu_eqs_enter(false); 675 } 676 677 #ifdef CONFIG_NO_HZ_FULL 678 /** 679 * rcu_user_enter - inform RCU that we are resuming userspace. 680 * 681 * Enter RCU idle mode right before resuming userspace. No use of RCU 682 * is permitted between this call and rcu_user_exit(). This way the 683 * CPU doesn't need to maintain the tick for RCU maintenance purposes 684 * when the CPU runs in userspace. 685 * 686 * If you add or remove a call to rcu_user_enter(), be sure to test with 687 * CONFIG_RCU_EQS_DEBUG=y. 688 */ 689 noinstr void rcu_user_enter(void) 690 { 691 lockdep_assert_irqs_disabled(); 692 rcu_eqs_enter(true); 693 } 694 #endif /* CONFIG_NO_HZ_FULL */ 695 696 /** 697 * rcu_nmi_exit - inform RCU of exit from NMI context 698 * 699 * If we are returning from the outermost NMI handler that interrupted an 700 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting 701 * to let the RCU grace-period handling know that the CPU is back to 702 * being RCU-idle. 703 * 704 * If you add or remove a call to rcu_nmi_exit(), be sure to test 705 * with CONFIG_RCU_EQS_DEBUG=y. 706 */ 707 noinstr void rcu_nmi_exit(void) 708 { 709 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 710 711 instrumentation_begin(); 712 /* 713 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 714 * (We are exiting an NMI handler, so RCU better be paying attention 715 * to us!) 716 */ 717 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); 718 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 719 720 /* 721 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 722 * leave it in non-RCU-idle state. 723 */ 724 if (rdp->dynticks_nmi_nesting != 1) { 725 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, 726 atomic_read(&rdp->dynticks)); 727 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ 728 rdp->dynticks_nmi_nesting - 2); 729 instrumentation_end(); 730 return; 731 } 732 733 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 734 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); 735 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 736 737 if (!in_nmi()) 738 rcu_prepare_for_idle(); 739 740 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 741 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 742 instrumentation_end(); 743 744 // RCU is watching here ... 745 rcu_dynticks_eqs_enter(); 746 // ... but is no longer watching here. 747 748 if (!in_nmi()) 749 rcu_dynticks_task_enter(); 750 } 751 752 /** 753 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 754 * 755 * Exit from an interrupt handler, which might possibly result in entering 756 * idle mode, in other words, leaving the mode in which read-side critical 757 * sections can occur. The caller must have disabled interrupts. 758 * 759 * This code assumes that the idle loop never does anything that might 760 * result in unbalanced calls to irq_enter() and irq_exit(). If your 761 * architecture's idle loop violates this assumption, RCU will give you what 762 * you deserve, good and hard. But very infrequently and irreproducibly. 763 * 764 * Use things like work queues to work around this limitation. 765 * 766 * You have been warned. 767 * 768 * If you add or remove a call to rcu_irq_exit(), be sure to test with 769 * CONFIG_RCU_EQS_DEBUG=y. 770 */ 771 void noinstr rcu_irq_exit(void) 772 { 773 lockdep_assert_irqs_disabled(); 774 rcu_nmi_exit(); 775 } 776 777 /** 778 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq 779 * towards in kernel preemption 780 * 781 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe 782 * from RCU point of view. Invoked from return from interrupt before kernel 783 * preemption. 784 */ 785 void rcu_irq_exit_preempt(void) 786 { 787 lockdep_assert_irqs_disabled(); 788 rcu_nmi_exit(); 789 790 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 791 "RCU dynticks_nesting counter underflow/zero!"); 792 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 793 DYNTICK_IRQ_NONIDLE, 794 "Bad RCU dynticks_nmi_nesting counter\n"); 795 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 796 "RCU in extended quiescent state!"); 797 } 798 799 #ifdef CONFIG_PROVE_RCU 800 /** 801 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 802 */ 803 void rcu_irq_exit_check_preempt(void) 804 { 805 lockdep_assert_irqs_disabled(); 806 807 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 808 "RCU dynticks_nesting counter underflow/zero!"); 809 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 810 DYNTICK_IRQ_NONIDLE, 811 "Bad RCU dynticks_nmi_nesting counter\n"); 812 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 813 "RCU in extended quiescent state!"); 814 } 815 #endif /* #ifdef CONFIG_PROVE_RCU */ 816 817 /* 818 * Wrapper for rcu_irq_exit() where interrupts are enabled. 819 * 820 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 821 * with CONFIG_RCU_EQS_DEBUG=y. 822 */ 823 void rcu_irq_exit_irqson(void) 824 { 825 unsigned long flags; 826 827 local_irq_save(flags); 828 rcu_irq_exit(); 829 local_irq_restore(flags); 830 } 831 832 /* 833 * Exit an RCU extended quiescent state, which can be either the 834 * idle loop or adaptive-tickless usermode execution. 835 * 836 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 837 * allow for the possibility of usermode upcalls messing up our count of 838 * interrupt nesting level during the busy period that is just now starting. 839 */ 840 static void noinstr rcu_eqs_exit(bool user) 841 { 842 struct rcu_data *rdp; 843 long oldval; 844 845 lockdep_assert_irqs_disabled(); 846 rdp = this_cpu_ptr(&rcu_data); 847 oldval = rdp->dynticks_nesting; 848 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 849 if (oldval) { 850 // RCU was already watching, so just do accounting and leave. 851 rdp->dynticks_nesting++; 852 return; 853 } 854 rcu_dynticks_task_exit(); 855 // RCU is not watching here ... 856 rcu_dynticks_eqs_exit(); 857 // ... but is watching here. 858 instrumentation_begin(); 859 860 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 861 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 862 863 rcu_cleanup_after_idle(); 864 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); 865 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 866 WRITE_ONCE(rdp->dynticks_nesting, 1); 867 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); 868 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 869 instrumentation_end(); 870 } 871 872 /** 873 * rcu_idle_exit - inform RCU that current CPU is leaving idle 874 * 875 * Exit idle mode, in other words, -enter- the mode in which RCU 876 * read-side critical sections can occur. 877 * 878 * If you add or remove a call to rcu_idle_exit(), be sure to test with 879 * CONFIG_RCU_EQS_DEBUG=y. 880 */ 881 void rcu_idle_exit(void) 882 { 883 unsigned long flags; 884 885 local_irq_save(flags); 886 rcu_eqs_exit(false); 887 local_irq_restore(flags); 888 } 889 890 #ifdef CONFIG_NO_HZ_FULL 891 /** 892 * rcu_user_exit - inform RCU that we are exiting userspace. 893 * 894 * Exit RCU idle mode while entering the kernel because it can 895 * run a RCU read side critical section anytime. 896 * 897 * If you add or remove a call to rcu_user_exit(), be sure to test with 898 * CONFIG_RCU_EQS_DEBUG=y. 899 */ 900 void noinstr rcu_user_exit(void) 901 { 902 rcu_eqs_exit(1); 903 } 904 905 /** 906 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 907 * 908 * The scheduler tick is not normally enabled when CPUs enter the kernel 909 * from nohz_full userspace execution. After all, nohz_full userspace 910 * execution is an RCU quiescent state and the time executing in the kernel 911 * is quite short. Except of course when it isn't. And it is not hard to 912 * cause a large system to spend tens of seconds or even minutes looping 913 * in the kernel, which can cause a number of problems, include RCU CPU 914 * stall warnings. 915 * 916 * Therefore, if a nohz_full CPU fails to report a quiescent state 917 * in a timely manner, the RCU grace-period kthread sets that CPU's 918 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 919 * exception will invoke this function, which will turn on the scheduler 920 * tick, which will enable RCU to detect that CPU's quiescent states, 921 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 922 * The tick will be disabled once a quiescent state is reported for 923 * this CPU. 924 * 925 * Of course, in carefully tuned systems, there might never be an 926 * interrupt or exception. In that case, the RCU grace-period kthread 927 * will eventually cause one to happen. However, in less carefully 928 * controlled environments, this function allows RCU to get what it 929 * needs without creating otherwise useless interruptions. 930 */ 931 void __rcu_irq_enter_check_tick(void) 932 { 933 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 934 935 // Enabling the tick is unsafe in NMI handlers. 936 if (WARN_ON_ONCE(in_nmi())) 937 return; 938 939 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 940 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 941 942 if (!tick_nohz_full_cpu(rdp->cpu) || 943 !READ_ONCE(rdp->rcu_urgent_qs) || 944 READ_ONCE(rdp->rcu_forced_tick)) { 945 // RCU doesn't need nohz_full help from this CPU, or it is 946 // already getting that help. 947 return; 948 } 949 950 // We get here only when not in an extended quiescent state and 951 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 952 // already watching and (2) The fact that we are in an interrupt 953 // handler and that the rcu_node lock is an irq-disabled lock 954 // prevents self-deadlock. So we can safely recheck under the lock. 955 // Note that the nohz_full state currently cannot change. 956 raw_spin_lock_rcu_node(rdp->mynode); 957 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { 958 // A nohz_full CPU is in the kernel and RCU needs a 959 // quiescent state. Turn on the tick! 960 WRITE_ONCE(rdp->rcu_forced_tick, true); 961 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 962 } 963 raw_spin_unlock_rcu_node(rdp->mynode); 964 } 965 #endif /* CONFIG_NO_HZ_FULL */ 966 967 /** 968 * rcu_nmi_enter - inform RCU of entry to NMI context 969 * 970 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and 971 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know 972 * that the CPU is active. This implementation permits nested NMIs, as 973 * long as the nesting level does not overflow an int. (You will probably 974 * run out of stack space first.) 975 * 976 * If you add or remove a call to rcu_nmi_enter(), be sure to test 977 * with CONFIG_RCU_EQS_DEBUG=y. 978 */ 979 noinstr void rcu_nmi_enter(void) 980 { 981 long incby = 2; 982 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 983 984 /* Complain about underflow. */ 985 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); 986 987 /* 988 * If idle from RCU viewpoint, atomically increment ->dynticks 989 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 990 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 991 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 992 * to be in the outermost NMI handler that interrupted an RCU-idle 993 * period (observation due to Andy Lutomirski). 994 */ 995 if (rcu_dynticks_curr_cpu_in_eqs()) { 996 997 if (!in_nmi()) 998 rcu_dynticks_task_exit(); 999 1000 // RCU is not watching here ... 1001 rcu_dynticks_eqs_exit(); 1002 // ... but is watching here. 1003 1004 if (!in_nmi()) { 1005 instrumentation_begin(); 1006 rcu_cleanup_after_idle(); 1007 instrumentation_end(); 1008 } 1009 1010 instrumentation_begin(); 1011 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() 1012 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); 1013 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 1014 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 1015 1016 incby = 1; 1017 } else if (!in_nmi()) { 1018 instrumentation_begin(); 1019 rcu_irq_enter_check_tick(); 1020 instrumentation_end(); 1021 } else { 1022 instrumentation_begin(); 1023 } 1024 1025 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 1026 rdp->dynticks_nmi_nesting, 1027 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); 1028 instrumentation_end(); 1029 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ 1030 rdp->dynticks_nmi_nesting + incby); 1031 barrier(); 1032 } 1033 1034 /** 1035 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 1036 * 1037 * Enter an interrupt handler, which might possibly result in exiting 1038 * idle mode, in other words, entering the mode in which read-side critical 1039 * sections can occur. The caller must have disabled interrupts. 1040 * 1041 * Note that the Linux kernel is fully capable of entering an interrupt 1042 * handler that it never exits, for example when doing upcalls to user mode! 1043 * This code assumes that the idle loop never does upcalls to user mode. 1044 * If your architecture's idle loop does do upcalls to user mode (or does 1045 * anything else that results in unbalanced calls to the irq_enter() and 1046 * irq_exit() functions), RCU will give you what you deserve, good and hard. 1047 * But very infrequently and irreproducibly. 1048 * 1049 * Use things like work queues to work around this limitation. 1050 * 1051 * You have been warned. 1052 * 1053 * If you add or remove a call to rcu_irq_enter(), be sure to test with 1054 * CONFIG_RCU_EQS_DEBUG=y. 1055 */ 1056 noinstr void rcu_irq_enter(void) 1057 { 1058 lockdep_assert_irqs_disabled(); 1059 rcu_nmi_enter(); 1060 } 1061 1062 /* 1063 * Wrapper for rcu_irq_enter() where interrupts are enabled. 1064 * 1065 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 1066 * with CONFIG_RCU_EQS_DEBUG=y. 1067 */ 1068 void rcu_irq_enter_irqson(void) 1069 { 1070 unsigned long flags; 1071 1072 local_irq_save(flags); 1073 rcu_irq_enter(); 1074 local_irq_restore(flags); 1075 } 1076 1077 /* 1078 * If any sort of urgency was applied to the current CPU (for example, 1079 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 1080 * to get to a quiescent state, disable it. 1081 */ 1082 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 1083 { 1084 raw_lockdep_assert_held_rcu_node(rdp->mynode); 1085 WRITE_ONCE(rdp->rcu_urgent_qs, false); 1086 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 1087 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 1088 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 1089 WRITE_ONCE(rdp->rcu_forced_tick, false); 1090 } 1091 } 1092 1093 noinstr bool __rcu_is_watching(void) 1094 { 1095 return !rcu_dynticks_curr_cpu_in_eqs(); 1096 } 1097 1098 /** 1099 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 1100 * 1101 * Return true if RCU is watching the running CPU, which means that this 1102 * CPU can safely enter RCU read-side critical sections. In other words, 1103 * if the current CPU is not in its idle loop or is in an interrupt or 1104 * NMI handler, return true. 1105 */ 1106 bool rcu_is_watching(void) 1107 { 1108 bool ret; 1109 1110 preempt_disable_notrace(); 1111 ret = !rcu_dynticks_curr_cpu_in_eqs(); 1112 preempt_enable_notrace(); 1113 return ret; 1114 } 1115 EXPORT_SYMBOL_GPL(rcu_is_watching); 1116 1117 /* 1118 * If a holdout task is actually running, request an urgent quiescent 1119 * state from its CPU. This is unsynchronized, so migrations can cause 1120 * the request to go to the wrong CPU. Which is OK, all that will happen 1121 * is that the CPU's next context switch will be a bit slower and next 1122 * time around this task will generate another request. 1123 */ 1124 void rcu_request_urgent_qs_task(struct task_struct *t) 1125 { 1126 int cpu; 1127 1128 barrier(); 1129 cpu = task_cpu(t); 1130 if (!task_curr(t)) 1131 return; /* This task is not running on that CPU. */ 1132 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 1133 } 1134 1135 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1136 1137 /* 1138 * Is the current CPU online as far as RCU is concerned? 1139 * 1140 * Disable preemption to avoid false positives that could otherwise 1141 * happen due to the current CPU number being sampled, this task being 1142 * preempted, its old CPU being taken offline, resuming on some other CPU, 1143 * then determining that its old CPU is now offline. 1144 * 1145 * Disable checking if in an NMI handler because we cannot safely 1146 * report errors from NMI handlers anyway. In addition, it is OK to use 1147 * RCU on an offline processor during initial boot, hence the check for 1148 * rcu_scheduler_fully_active. 1149 */ 1150 bool rcu_lockdep_current_cpu_online(void) 1151 { 1152 struct rcu_data *rdp; 1153 struct rcu_node *rnp; 1154 bool ret = false; 1155 1156 if (in_nmi() || !rcu_scheduler_fully_active) 1157 return true; 1158 preempt_disable_notrace(); 1159 rdp = this_cpu_ptr(&rcu_data); 1160 rnp = rdp->mynode; 1161 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) 1162 ret = true; 1163 preempt_enable_notrace(); 1164 return ret; 1165 } 1166 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1167 1168 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 1169 1170 /* 1171 * We are reporting a quiescent state on behalf of some other CPU, so 1172 * it is our responsibility to check for and handle potential overflow 1173 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 1174 * After all, the CPU might be in deep idle state, and thus executing no 1175 * code whatsoever. 1176 */ 1177 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1178 { 1179 raw_lockdep_assert_held_rcu_node(rnp); 1180 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 1181 rnp->gp_seq)) 1182 WRITE_ONCE(rdp->gpwrap, true); 1183 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 1184 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 1185 } 1186 1187 /* 1188 * Snapshot the specified CPU's dynticks counter so that we can later 1189 * credit them with an implicit quiescent state. Return 1 if this CPU 1190 * is in dynticks idle mode, which is an extended quiescent state. 1191 */ 1192 static int dyntick_save_progress_counter(struct rcu_data *rdp) 1193 { 1194 rdp->dynticks_snap = rcu_dynticks_snap(rdp); 1195 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1196 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1197 rcu_gpnum_ovf(rdp->mynode, rdp); 1198 return 1; 1199 } 1200 return 0; 1201 } 1202 1203 /* 1204 * Return true if the specified CPU has passed through a quiescent 1205 * state by virtue of being in or having passed through an dynticks 1206 * idle state since the last call to dyntick_save_progress_counter() 1207 * for this same CPU, or by virtue of having been offline. 1208 */ 1209 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 1210 { 1211 unsigned long jtsq; 1212 bool *rnhqp; 1213 bool *ruqp; 1214 struct rcu_node *rnp = rdp->mynode; 1215 1216 /* 1217 * If the CPU passed through or entered a dynticks idle phase with 1218 * no active irq/NMI handlers, then we can safely pretend that the CPU 1219 * already acknowledged the request to pass through a quiescent 1220 * state. Either way, that CPU cannot possibly be in an RCU 1221 * read-side critical section that started before the beginning 1222 * of the current RCU grace period. 1223 */ 1224 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 1225 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1226 rcu_gpnum_ovf(rnp, rdp); 1227 return 1; 1228 } 1229 1230 /* If waiting too long on an offline CPU, complain. */ 1231 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && 1232 time_after(jiffies, rcu_state.gp_start + HZ)) { 1233 bool onl; 1234 struct rcu_node *rnp1; 1235 1236 WARN_ON(1); /* Offline CPUs are supposed to report QS! */ 1237 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 1238 __func__, rnp->grplo, rnp->grphi, rnp->level, 1239 (long)rnp->gp_seq, (long)rnp->completedqs); 1240 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 1241 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 1242 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 1243 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 1244 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 1245 __func__, rdp->cpu, ".o"[onl], 1246 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 1247 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 1248 return 1; /* Break things loose after complaining. */ 1249 } 1250 1251 /* 1252 * A CPU running for an extended time within the kernel can 1253 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 1254 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 1255 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 1256 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 1257 * variable are safe because the assignments are repeated if this 1258 * CPU failed to pass through a quiescent state. This code 1259 * also checks .jiffies_resched in case jiffies_to_sched_qs 1260 * is set way high. 1261 */ 1262 jtsq = READ_ONCE(jiffies_to_sched_qs); 1263 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); 1264 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); 1265 if (!READ_ONCE(*rnhqp) && 1266 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 1267 time_after(jiffies, rcu_state.jiffies_resched) || 1268 rcu_state.cbovld)) { 1269 WRITE_ONCE(*rnhqp, true); 1270 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1271 smp_store_release(ruqp, true); 1272 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 1273 WRITE_ONCE(*ruqp, true); 1274 } 1275 1276 /* 1277 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 1278 * The above code handles this, but only for straight cond_resched(). 1279 * And some in-kernel loops check need_resched() before calling 1280 * cond_resched(), which defeats the above code for CPUs that are 1281 * running in-kernel with scheduling-clock interrupts disabled. 1282 * So hit them over the head with the resched_cpu() hammer! 1283 */ 1284 if (tick_nohz_full_cpu(rdp->cpu) && 1285 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 1286 rcu_state.cbovld)) { 1287 WRITE_ONCE(*ruqp, true); 1288 resched_cpu(rdp->cpu); 1289 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1290 } 1291 1292 /* 1293 * If more than halfway to RCU CPU stall-warning time, invoke 1294 * resched_cpu() more frequently to try to loosen things up a bit. 1295 * Also check to see if the CPU is getting hammered with interrupts, 1296 * but only once per grace period, just to keep the IPIs down to 1297 * a dull roar. 1298 */ 1299 if (time_after(jiffies, rcu_state.jiffies_resched)) { 1300 if (time_after(jiffies, 1301 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 1302 resched_cpu(rdp->cpu); 1303 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1304 } 1305 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1306 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 1307 (rnp->ffmask & rdp->grpmask)) { 1308 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1309 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ); 1310 rdp->rcu_iw_pending = true; 1311 rdp->rcu_iw_gp_seq = rnp->gp_seq; 1312 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1313 } 1314 } 1315 1316 return 0; 1317 } 1318 1319 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1320 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1321 unsigned long gp_seq_req, const char *s) 1322 { 1323 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 1324 gp_seq_req, rnp->level, 1325 rnp->grplo, rnp->grphi, s); 1326 } 1327 1328 /* 1329 * rcu_start_this_gp - Request the start of a particular grace period 1330 * @rnp_start: The leaf node of the CPU from which to start. 1331 * @rdp: The rcu_data corresponding to the CPU from which to start. 1332 * @gp_seq_req: The gp_seq of the grace period to start. 1333 * 1334 * Start the specified grace period, as needed to handle newly arrived 1335 * callbacks. The required future grace periods are recorded in each 1336 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1337 * is reason to awaken the grace-period kthread. 1338 * 1339 * The caller must hold the specified rcu_node structure's ->lock, which 1340 * is why the caller is responsible for waking the grace-period kthread. 1341 * 1342 * Returns true if the GP thread needs to be awakened else false. 1343 */ 1344 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1345 unsigned long gp_seq_req) 1346 { 1347 bool ret = false; 1348 struct rcu_node *rnp; 1349 1350 /* 1351 * Use funnel locking to either acquire the root rcu_node 1352 * structure's lock or bail out if the need for this grace period 1353 * has already been recorded -- or if that grace period has in 1354 * fact already started. If there is already a grace period in 1355 * progress in a non-leaf node, no recording is needed because the 1356 * end of the grace period will scan the leaf rcu_node structures. 1357 * Note that rnp_start->lock must not be released. 1358 */ 1359 raw_lockdep_assert_held_rcu_node(rnp_start); 1360 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1361 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1362 if (rnp != rnp_start) 1363 raw_spin_lock_rcu_node(rnp); 1364 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1365 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1366 (rnp != rnp_start && 1367 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1368 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1369 TPS("Prestarted")); 1370 goto unlock_out; 1371 } 1372 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1373 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1374 /* 1375 * We just marked the leaf or internal node, and a 1376 * grace period is in progress, which means that 1377 * rcu_gp_cleanup() will see the marking. Bail to 1378 * reduce contention. 1379 */ 1380 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1381 TPS("Startedleaf")); 1382 goto unlock_out; 1383 } 1384 if (rnp != rnp_start && rnp->parent != NULL) 1385 raw_spin_unlock_rcu_node(rnp); 1386 if (!rnp->parent) 1387 break; /* At root, and perhaps also leaf. */ 1388 } 1389 1390 /* If GP already in progress, just leave, otherwise start one. */ 1391 if (rcu_gp_in_progress()) { 1392 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1393 goto unlock_out; 1394 } 1395 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1396 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1397 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1398 if (!READ_ONCE(rcu_state.gp_kthread)) { 1399 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1400 goto unlock_out; 1401 } 1402 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1403 ret = true; /* Caller must wake GP kthread. */ 1404 unlock_out: 1405 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1406 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1407 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1408 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1409 } 1410 if (rnp != rnp_start) 1411 raw_spin_unlock_rcu_node(rnp); 1412 return ret; 1413 } 1414 1415 /* 1416 * Clean up any old requests for the just-ended grace period. Also return 1417 * whether any additional grace periods have been requested. 1418 */ 1419 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1420 { 1421 bool needmore; 1422 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1423 1424 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1425 if (!needmore) 1426 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1427 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1428 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1429 return needmore; 1430 } 1431 1432 /* 1433 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1434 * interrupt or softirq handler, in which case we just might immediately 1435 * sleep upon return, resulting in a grace-period hang), and don't bother 1436 * awakening when there is nothing for the grace-period kthread to do 1437 * (as in several CPUs raced to awaken, we lost), and finally don't try 1438 * to awaken a kthread that has not yet been created. If all those checks 1439 * are passed, track some debug information and awaken. 1440 * 1441 * So why do the self-wakeup when in an interrupt or softirq handler 1442 * in the grace-period kthread's context? Because the kthread might have 1443 * been interrupted just as it was going to sleep, and just after the final 1444 * pre-sleep check of the awaken condition. In this case, a wakeup really 1445 * is required, and is therefore supplied. 1446 */ 1447 static void rcu_gp_kthread_wake(void) 1448 { 1449 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1450 1451 if ((current == t && !in_irq() && !in_serving_softirq()) || 1452 !READ_ONCE(rcu_state.gp_flags) || !t) 1453 return; 1454 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1455 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1456 swake_up_one(&rcu_state.gp_wq); 1457 } 1458 1459 /* 1460 * If there is room, assign a ->gp_seq number to any callbacks on this 1461 * CPU that have not already been assigned. Also accelerate any callbacks 1462 * that were previously assigned a ->gp_seq number that has since proven 1463 * to be too conservative, which can happen if callbacks get assigned a 1464 * ->gp_seq number while RCU is idle, but with reference to a non-root 1465 * rcu_node structure. This function is idempotent, so it does not hurt 1466 * to call it repeatedly. Returns an flag saying that we should awaken 1467 * the RCU grace-period kthread. 1468 * 1469 * The caller must hold rnp->lock with interrupts disabled. 1470 */ 1471 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1472 { 1473 unsigned long gp_seq_req; 1474 bool ret = false; 1475 1476 rcu_lockdep_assert_cblist_protected(rdp); 1477 raw_lockdep_assert_held_rcu_node(rnp); 1478 1479 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1480 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1481 return false; 1482 1483 /* 1484 * Callbacks are often registered with incomplete grace-period 1485 * information. Something about the fact that getting exact 1486 * information requires acquiring a global lock... RCU therefore 1487 * makes a conservative estimate of the grace period number at which 1488 * a given callback will become ready to invoke. The following 1489 * code checks this estimate and improves it when possible, thus 1490 * accelerating callback invocation to an earlier grace-period 1491 * number. 1492 */ 1493 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1494 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1495 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1496 1497 /* Trace depending on how much we were able to accelerate. */ 1498 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1499 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); 1500 else 1501 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); 1502 return ret; 1503 } 1504 1505 /* 1506 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1507 * rcu_node structure's ->lock be held. It consults the cached value 1508 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1509 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1510 * while holding the leaf rcu_node structure's ->lock. 1511 */ 1512 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1513 struct rcu_data *rdp) 1514 { 1515 unsigned long c; 1516 bool needwake; 1517 1518 rcu_lockdep_assert_cblist_protected(rdp); 1519 c = rcu_seq_snap(&rcu_state.gp_seq); 1520 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1521 /* Old request still live, so mark recent callbacks. */ 1522 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1523 return; 1524 } 1525 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1526 needwake = rcu_accelerate_cbs(rnp, rdp); 1527 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1528 if (needwake) 1529 rcu_gp_kthread_wake(); 1530 } 1531 1532 /* 1533 * Move any callbacks whose grace period has completed to the 1534 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1535 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1536 * sublist. This function is idempotent, so it does not hurt to 1537 * invoke it repeatedly. As long as it is not invoked -too- often... 1538 * Returns true if the RCU grace-period kthread needs to be awakened. 1539 * 1540 * The caller must hold rnp->lock with interrupts disabled. 1541 */ 1542 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1543 { 1544 rcu_lockdep_assert_cblist_protected(rdp); 1545 raw_lockdep_assert_held_rcu_node(rnp); 1546 1547 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1548 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1549 return false; 1550 1551 /* 1552 * Find all callbacks whose ->gp_seq numbers indicate that they 1553 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1554 */ 1555 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1556 1557 /* Classify any remaining callbacks. */ 1558 return rcu_accelerate_cbs(rnp, rdp); 1559 } 1560 1561 /* 1562 * Move and classify callbacks, but only if doing so won't require 1563 * that the RCU grace-period kthread be awakened. 1564 */ 1565 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1566 struct rcu_data *rdp) 1567 { 1568 rcu_lockdep_assert_cblist_protected(rdp); 1569 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || 1570 !raw_spin_trylock_rcu_node(rnp)) 1571 return; 1572 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1573 raw_spin_unlock_rcu_node(rnp); 1574 } 1575 1576 /* 1577 * Update CPU-local rcu_data state to record the beginnings and ends of 1578 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1579 * structure corresponding to the current CPU, and must have irqs disabled. 1580 * Returns true if the grace-period kthread needs to be awakened. 1581 */ 1582 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1583 { 1584 bool ret = false; 1585 bool need_qs; 1586 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 1587 rcu_segcblist_is_offloaded(&rdp->cblist); 1588 1589 raw_lockdep_assert_held_rcu_node(rnp); 1590 1591 if (rdp->gp_seq == rnp->gp_seq) 1592 return false; /* Nothing to do. */ 1593 1594 /* Handle the ends of any preceding grace periods first. */ 1595 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1596 unlikely(READ_ONCE(rdp->gpwrap))) { 1597 if (!offloaded) 1598 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1599 rdp->core_needs_qs = false; 1600 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1601 } else { 1602 if (!offloaded) 1603 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1604 if (rdp->core_needs_qs) 1605 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1606 } 1607 1608 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1609 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1610 unlikely(READ_ONCE(rdp->gpwrap))) { 1611 /* 1612 * If the current grace period is waiting for this CPU, 1613 * set up to detect a quiescent state, otherwise don't 1614 * go looking for one. 1615 */ 1616 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1617 need_qs = !!(rnp->qsmask & rdp->grpmask); 1618 rdp->cpu_no_qs.b.norm = need_qs; 1619 rdp->core_needs_qs = need_qs; 1620 zero_cpu_stall_ticks(rdp); 1621 } 1622 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1623 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1624 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1625 WRITE_ONCE(rdp->gpwrap, false); 1626 rcu_gpnum_ovf(rnp, rdp); 1627 return ret; 1628 } 1629 1630 static void note_gp_changes(struct rcu_data *rdp) 1631 { 1632 unsigned long flags; 1633 bool needwake; 1634 struct rcu_node *rnp; 1635 1636 local_irq_save(flags); 1637 rnp = rdp->mynode; 1638 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1639 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1640 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1641 local_irq_restore(flags); 1642 return; 1643 } 1644 needwake = __note_gp_changes(rnp, rdp); 1645 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1646 if (needwake) 1647 rcu_gp_kthread_wake(); 1648 } 1649 1650 static void rcu_gp_slow(int delay) 1651 { 1652 if (delay > 0 && 1653 !(rcu_seq_ctr(rcu_state.gp_seq) % 1654 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1655 schedule_timeout_idle(delay); 1656 } 1657 1658 static unsigned long sleep_duration; 1659 1660 /* Allow rcutorture to stall the grace-period kthread. */ 1661 void rcu_gp_set_torture_wait(int duration) 1662 { 1663 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1664 WRITE_ONCE(sleep_duration, duration); 1665 } 1666 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1667 1668 /* Actually implement the aforementioned wait. */ 1669 static void rcu_gp_torture_wait(void) 1670 { 1671 unsigned long duration; 1672 1673 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1674 return; 1675 duration = xchg(&sleep_duration, 0UL); 1676 if (duration > 0) { 1677 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1678 schedule_timeout_idle(duration); 1679 pr_alert("%s: Wait complete\n", __func__); 1680 } 1681 } 1682 1683 /* 1684 * Initialize a new grace period. Return false if no grace period required. 1685 */ 1686 static bool rcu_gp_init(void) 1687 { 1688 unsigned long flags; 1689 unsigned long oldmask; 1690 unsigned long mask; 1691 struct rcu_data *rdp; 1692 struct rcu_node *rnp = rcu_get_root(); 1693 1694 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1695 raw_spin_lock_irq_rcu_node(rnp); 1696 if (!READ_ONCE(rcu_state.gp_flags)) { 1697 /* Spurious wakeup, tell caller to go back to sleep. */ 1698 raw_spin_unlock_irq_rcu_node(rnp); 1699 return false; 1700 } 1701 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1702 1703 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1704 /* 1705 * Grace period already in progress, don't start another. 1706 * Not supposed to be able to happen. 1707 */ 1708 raw_spin_unlock_irq_rcu_node(rnp); 1709 return false; 1710 } 1711 1712 /* Advance to a new grace period and initialize state. */ 1713 record_gp_stall_check_time(); 1714 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1715 rcu_seq_start(&rcu_state.gp_seq); 1716 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1717 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1718 raw_spin_unlock_irq_rcu_node(rnp); 1719 1720 /* 1721 * Apply per-leaf buffered online and offline operations to the 1722 * rcu_node tree. Note that this new grace period need not wait 1723 * for subsequent online CPUs, and that quiescent-state forcing 1724 * will handle subsequent offline CPUs. 1725 */ 1726 rcu_state.gp_state = RCU_GP_ONOFF; 1727 rcu_for_each_leaf_node(rnp) { 1728 raw_spin_lock(&rcu_state.ofl_lock); 1729 raw_spin_lock_irq_rcu_node(rnp); 1730 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1731 !rnp->wait_blkd_tasks) { 1732 /* Nothing to do on this leaf rcu_node structure. */ 1733 raw_spin_unlock_irq_rcu_node(rnp); 1734 raw_spin_unlock(&rcu_state.ofl_lock); 1735 continue; 1736 } 1737 1738 /* Record old state, apply changes to ->qsmaskinit field. */ 1739 oldmask = rnp->qsmaskinit; 1740 rnp->qsmaskinit = rnp->qsmaskinitnext; 1741 1742 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1743 if (!oldmask != !rnp->qsmaskinit) { 1744 if (!oldmask) { /* First online CPU for rcu_node. */ 1745 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1746 rcu_init_new_rnp(rnp); 1747 } else if (rcu_preempt_has_tasks(rnp)) { 1748 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1749 } else { /* Last offline CPU and can propagate. */ 1750 rcu_cleanup_dead_rnp(rnp); 1751 } 1752 } 1753 1754 /* 1755 * If all waited-on tasks from prior grace period are 1756 * done, and if all this rcu_node structure's CPUs are 1757 * still offline, propagate up the rcu_node tree and 1758 * clear ->wait_blkd_tasks. Otherwise, if one of this 1759 * rcu_node structure's CPUs has since come back online, 1760 * simply clear ->wait_blkd_tasks. 1761 */ 1762 if (rnp->wait_blkd_tasks && 1763 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1764 rnp->wait_blkd_tasks = false; 1765 if (!rnp->qsmaskinit) 1766 rcu_cleanup_dead_rnp(rnp); 1767 } 1768 1769 raw_spin_unlock_irq_rcu_node(rnp); 1770 raw_spin_unlock(&rcu_state.ofl_lock); 1771 } 1772 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1773 1774 /* 1775 * Set the quiescent-state-needed bits in all the rcu_node 1776 * structures for all currently online CPUs in breadth-first 1777 * order, starting from the root rcu_node structure, relying on the 1778 * layout of the tree within the rcu_state.node[] array. Note that 1779 * other CPUs will access only the leaves of the hierarchy, thus 1780 * seeing that no grace period is in progress, at least until the 1781 * corresponding leaf node has been initialized. 1782 * 1783 * The grace period cannot complete until the initialization 1784 * process finishes, because this kthread handles both. 1785 */ 1786 rcu_state.gp_state = RCU_GP_INIT; 1787 rcu_for_each_node_breadth_first(rnp) { 1788 rcu_gp_slow(gp_init_delay); 1789 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1790 rdp = this_cpu_ptr(&rcu_data); 1791 rcu_preempt_check_blocked_tasks(rnp); 1792 rnp->qsmask = rnp->qsmaskinit; 1793 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1794 if (rnp == rdp->mynode) 1795 (void)__note_gp_changes(rnp, rdp); 1796 rcu_preempt_boost_start_gp(rnp); 1797 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1798 rnp->level, rnp->grplo, 1799 rnp->grphi, rnp->qsmask); 1800 /* Quiescent states for tasks on any now-offline CPUs. */ 1801 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1802 rnp->rcu_gp_init_mask = mask; 1803 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1804 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1805 else 1806 raw_spin_unlock_irq_rcu_node(rnp); 1807 cond_resched_tasks_rcu_qs(); 1808 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1809 } 1810 1811 return true; 1812 } 1813 1814 /* 1815 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1816 * time. 1817 */ 1818 static bool rcu_gp_fqs_check_wake(int *gfp) 1819 { 1820 struct rcu_node *rnp = rcu_get_root(); 1821 1822 // If under overload conditions, force an immediate FQS scan. 1823 if (*gfp & RCU_GP_FLAG_OVLD) 1824 return true; 1825 1826 // Someone like call_rcu() requested a force-quiescent-state scan. 1827 *gfp = READ_ONCE(rcu_state.gp_flags); 1828 if (*gfp & RCU_GP_FLAG_FQS) 1829 return true; 1830 1831 // The current grace period has completed. 1832 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1833 return true; 1834 1835 return false; 1836 } 1837 1838 /* 1839 * Do one round of quiescent-state forcing. 1840 */ 1841 static void rcu_gp_fqs(bool first_time) 1842 { 1843 struct rcu_node *rnp = rcu_get_root(); 1844 1845 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1846 rcu_state.n_force_qs++; 1847 if (first_time) { 1848 /* Collect dyntick-idle snapshots. */ 1849 force_qs_rnp(dyntick_save_progress_counter); 1850 } else { 1851 /* Handle dyntick-idle and offline CPUs. */ 1852 force_qs_rnp(rcu_implicit_dynticks_qs); 1853 } 1854 /* Clear flag to prevent immediate re-entry. */ 1855 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1856 raw_spin_lock_irq_rcu_node(rnp); 1857 WRITE_ONCE(rcu_state.gp_flags, 1858 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1859 raw_spin_unlock_irq_rcu_node(rnp); 1860 } 1861 } 1862 1863 /* 1864 * Loop doing repeated quiescent-state forcing until the grace period ends. 1865 */ 1866 static void rcu_gp_fqs_loop(void) 1867 { 1868 bool first_gp_fqs; 1869 int gf = 0; 1870 unsigned long j; 1871 int ret; 1872 struct rcu_node *rnp = rcu_get_root(); 1873 1874 first_gp_fqs = true; 1875 j = READ_ONCE(jiffies_till_first_fqs); 1876 if (rcu_state.cbovld) 1877 gf = RCU_GP_FLAG_OVLD; 1878 ret = 0; 1879 for (;;) { 1880 if (!ret) { 1881 rcu_state.jiffies_force_qs = jiffies + j; 1882 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1883 jiffies + (j ? 3 * j : 2)); 1884 } 1885 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1886 TPS("fqswait")); 1887 rcu_state.gp_state = RCU_GP_WAIT_FQS; 1888 ret = swait_event_idle_timeout_exclusive( 1889 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); 1890 rcu_gp_torture_wait(); 1891 rcu_state.gp_state = RCU_GP_DOING_FQS; 1892 /* Locking provides needed memory barriers. */ 1893 /* If grace period done, leave loop. */ 1894 if (!READ_ONCE(rnp->qsmask) && 1895 !rcu_preempt_blocked_readers_cgp(rnp)) 1896 break; 1897 /* If time for quiescent-state forcing, do it. */ 1898 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 1899 (gf & RCU_GP_FLAG_FQS)) { 1900 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1901 TPS("fqsstart")); 1902 rcu_gp_fqs(first_gp_fqs); 1903 gf = 0; 1904 if (first_gp_fqs) { 1905 first_gp_fqs = false; 1906 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 1907 } 1908 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1909 TPS("fqsend")); 1910 cond_resched_tasks_rcu_qs(); 1911 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1912 ret = 0; /* Force full wait till next FQS. */ 1913 j = READ_ONCE(jiffies_till_next_fqs); 1914 } else { 1915 /* Deal with stray signal. */ 1916 cond_resched_tasks_rcu_qs(); 1917 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1918 WARN_ON(signal_pending(current)); 1919 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1920 TPS("fqswaitsig")); 1921 ret = 1; /* Keep old FQS timing. */ 1922 j = jiffies; 1923 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1924 j = 1; 1925 else 1926 j = rcu_state.jiffies_force_qs - j; 1927 gf = 0; 1928 } 1929 } 1930 } 1931 1932 /* 1933 * Clean up after the old grace period. 1934 */ 1935 static void rcu_gp_cleanup(void) 1936 { 1937 int cpu; 1938 bool needgp = false; 1939 unsigned long gp_duration; 1940 unsigned long new_gp_seq; 1941 bool offloaded; 1942 struct rcu_data *rdp; 1943 struct rcu_node *rnp = rcu_get_root(); 1944 struct swait_queue_head *sq; 1945 1946 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1947 raw_spin_lock_irq_rcu_node(rnp); 1948 rcu_state.gp_end = jiffies; 1949 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 1950 if (gp_duration > rcu_state.gp_max) 1951 rcu_state.gp_max = gp_duration; 1952 1953 /* 1954 * We know the grace period is complete, but to everyone else 1955 * it appears to still be ongoing. But it is also the case 1956 * that to everyone else it looks like there is nothing that 1957 * they can do to advance the grace period. It is therefore 1958 * safe for us to drop the lock in order to mark the grace 1959 * period as completed in all of the rcu_node structures. 1960 */ 1961 raw_spin_unlock_irq_rcu_node(rnp); 1962 1963 /* 1964 * Propagate new ->gp_seq value to rcu_node structures so that 1965 * other CPUs don't have to wait until the start of the next grace 1966 * period to process their callbacks. This also avoids some nasty 1967 * RCU grace-period initialization races by forcing the end of 1968 * the current grace period to be completely recorded in all of 1969 * the rcu_node structures before the beginning of the next grace 1970 * period is recorded in any of the rcu_node structures. 1971 */ 1972 new_gp_seq = rcu_state.gp_seq; 1973 rcu_seq_end(&new_gp_seq); 1974 rcu_for_each_node_breadth_first(rnp) { 1975 raw_spin_lock_irq_rcu_node(rnp); 1976 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 1977 dump_blkd_tasks(rnp, 10); 1978 WARN_ON_ONCE(rnp->qsmask); 1979 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 1980 rdp = this_cpu_ptr(&rcu_data); 1981 if (rnp == rdp->mynode) 1982 needgp = __note_gp_changes(rnp, rdp) || needgp; 1983 /* smp_mb() provided by prior unlock-lock pair. */ 1984 needgp = rcu_future_gp_cleanup(rnp) || needgp; 1985 // Reset overload indication for CPUs no longer overloaded 1986 if (rcu_is_leaf_node(rnp)) 1987 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 1988 rdp = per_cpu_ptr(&rcu_data, cpu); 1989 check_cb_ovld_locked(rdp, rnp); 1990 } 1991 sq = rcu_nocb_gp_get(rnp); 1992 raw_spin_unlock_irq_rcu_node(rnp); 1993 rcu_nocb_gp_cleanup(sq); 1994 cond_resched_tasks_rcu_qs(); 1995 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1996 rcu_gp_slow(gp_cleanup_delay); 1997 } 1998 rnp = rcu_get_root(); 1999 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 2000 2001 /* Declare grace period done, trace first to use old GP number. */ 2002 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 2003 rcu_seq_end(&rcu_state.gp_seq); 2004 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 2005 rcu_state.gp_state = RCU_GP_IDLE; 2006 /* Check for GP requests since above loop. */ 2007 rdp = this_cpu_ptr(&rcu_data); 2008 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 2009 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 2010 TPS("CleanupMore")); 2011 needgp = true; 2012 } 2013 /* Advance CBs to reduce false positives below. */ 2014 offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2015 rcu_segcblist_is_offloaded(&rdp->cblist); 2016 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2017 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2018 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2019 trace_rcu_grace_period(rcu_state.name, 2020 rcu_state.gp_seq, 2021 TPS("newreq")); 2022 } else { 2023 WRITE_ONCE(rcu_state.gp_flags, 2024 rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2025 } 2026 raw_spin_unlock_irq_rcu_node(rnp); 2027 } 2028 2029 /* 2030 * Body of kthread that handles grace periods. 2031 */ 2032 static int __noreturn rcu_gp_kthread(void *unused) 2033 { 2034 rcu_bind_gp_kthread(); 2035 for (;;) { 2036 2037 /* Handle grace-period start. */ 2038 for (;;) { 2039 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2040 TPS("reqwait")); 2041 rcu_state.gp_state = RCU_GP_WAIT_GPS; 2042 swait_event_idle_exclusive(rcu_state.gp_wq, 2043 READ_ONCE(rcu_state.gp_flags) & 2044 RCU_GP_FLAG_INIT); 2045 rcu_gp_torture_wait(); 2046 rcu_state.gp_state = RCU_GP_DONE_GPS; 2047 /* Locking provides needed memory barrier. */ 2048 if (rcu_gp_init()) 2049 break; 2050 cond_resched_tasks_rcu_qs(); 2051 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2052 WARN_ON(signal_pending(current)); 2053 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2054 TPS("reqwaitsig")); 2055 } 2056 2057 /* Handle quiescent-state forcing. */ 2058 rcu_gp_fqs_loop(); 2059 2060 /* Handle grace-period end. */ 2061 rcu_state.gp_state = RCU_GP_CLEANUP; 2062 rcu_gp_cleanup(); 2063 rcu_state.gp_state = RCU_GP_CLEANED; 2064 } 2065 } 2066 2067 /* 2068 * Report a full set of quiescent states to the rcu_state data structure. 2069 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2070 * another grace period is required. Whether we wake the grace-period 2071 * kthread or it awakens itself for the next round of quiescent-state 2072 * forcing, that kthread will clean up after the just-completed grace 2073 * period. Note that the caller must hold rnp->lock, which is released 2074 * before return. 2075 */ 2076 static void rcu_report_qs_rsp(unsigned long flags) 2077 __releases(rcu_get_root()->lock) 2078 { 2079 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2080 WARN_ON_ONCE(!rcu_gp_in_progress()); 2081 WRITE_ONCE(rcu_state.gp_flags, 2082 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2083 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2084 rcu_gp_kthread_wake(); 2085 } 2086 2087 /* 2088 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2089 * Allows quiescent states for a group of CPUs to be reported at one go 2090 * to the specified rcu_node structure, though all the CPUs in the group 2091 * must be represented by the same rcu_node structure (which need not be a 2092 * leaf rcu_node structure, though it often will be). The gps parameter 2093 * is the grace-period snapshot, which means that the quiescent states 2094 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2095 * must be held upon entry, and it is released before return. 2096 * 2097 * As a special case, if mask is zero, the bit-already-cleared check is 2098 * disabled. This allows propagating quiescent state due to resumed tasks 2099 * during grace-period initialization. 2100 */ 2101 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2102 unsigned long gps, unsigned long flags) 2103 __releases(rnp->lock) 2104 { 2105 unsigned long oldmask = 0; 2106 struct rcu_node *rnp_c; 2107 2108 raw_lockdep_assert_held_rcu_node(rnp); 2109 2110 /* Walk up the rcu_node hierarchy. */ 2111 for (;;) { 2112 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2113 2114 /* 2115 * Our bit has already been cleared, or the 2116 * relevant grace period is already over, so done. 2117 */ 2118 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2119 return; 2120 } 2121 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2122 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2123 rcu_preempt_blocked_readers_cgp(rnp)); 2124 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2125 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2126 mask, rnp->qsmask, rnp->level, 2127 rnp->grplo, rnp->grphi, 2128 !!rnp->gp_tasks); 2129 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2130 2131 /* Other bits still set at this level, so done. */ 2132 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2133 return; 2134 } 2135 rnp->completedqs = rnp->gp_seq; 2136 mask = rnp->grpmask; 2137 if (rnp->parent == NULL) { 2138 2139 /* No more levels. Exit loop holding root lock. */ 2140 2141 break; 2142 } 2143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2144 rnp_c = rnp; 2145 rnp = rnp->parent; 2146 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2147 oldmask = READ_ONCE(rnp_c->qsmask); 2148 } 2149 2150 /* 2151 * Get here if we are the last CPU to pass through a quiescent 2152 * state for this grace period. Invoke rcu_report_qs_rsp() 2153 * to clean up and start the next grace period if one is needed. 2154 */ 2155 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2156 } 2157 2158 /* 2159 * Record a quiescent state for all tasks that were previously queued 2160 * on the specified rcu_node structure and that were blocking the current 2161 * RCU grace period. The caller must hold the corresponding rnp->lock with 2162 * irqs disabled, and this lock is released upon return, but irqs remain 2163 * disabled. 2164 */ 2165 static void __maybe_unused 2166 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2167 __releases(rnp->lock) 2168 { 2169 unsigned long gps; 2170 unsigned long mask; 2171 struct rcu_node *rnp_p; 2172 2173 raw_lockdep_assert_held_rcu_node(rnp); 2174 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2175 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2176 rnp->qsmask != 0) { 2177 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2178 return; /* Still need more quiescent states! */ 2179 } 2180 2181 rnp->completedqs = rnp->gp_seq; 2182 rnp_p = rnp->parent; 2183 if (rnp_p == NULL) { 2184 /* 2185 * Only one rcu_node structure in the tree, so don't 2186 * try to report up to its nonexistent parent! 2187 */ 2188 rcu_report_qs_rsp(flags); 2189 return; 2190 } 2191 2192 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2193 gps = rnp->gp_seq; 2194 mask = rnp->grpmask; 2195 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2196 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2197 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2198 } 2199 2200 /* 2201 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2202 * structure. This must be called from the specified CPU. 2203 */ 2204 static void 2205 rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) 2206 { 2207 unsigned long flags; 2208 unsigned long mask; 2209 bool needwake = false; 2210 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2211 rcu_segcblist_is_offloaded(&rdp->cblist); 2212 struct rcu_node *rnp; 2213 2214 rnp = rdp->mynode; 2215 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2216 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2217 rdp->gpwrap) { 2218 2219 /* 2220 * The grace period in which this quiescent state was 2221 * recorded has ended, so don't report it upwards. 2222 * We will instead need a new quiescent state that lies 2223 * within the current grace period. 2224 */ 2225 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2226 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2227 return; 2228 } 2229 mask = rdp->grpmask; 2230 if (rdp->cpu == smp_processor_id()) 2231 rdp->core_needs_qs = false; 2232 if ((rnp->qsmask & mask) == 0) { 2233 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2234 } else { 2235 /* 2236 * This GP can't end until cpu checks in, so all of our 2237 * callbacks can be processed during the next GP. 2238 */ 2239 if (!offloaded) 2240 needwake = rcu_accelerate_cbs(rnp, rdp); 2241 2242 rcu_disable_urgency_upon_qs(rdp); 2243 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2244 /* ^^^ Released rnp->lock */ 2245 if (needwake) 2246 rcu_gp_kthread_wake(); 2247 } 2248 } 2249 2250 /* 2251 * Check to see if there is a new grace period of which this CPU 2252 * is not yet aware, and if so, set up local rcu_data state for it. 2253 * Otherwise, see if this CPU has just passed through its first 2254 * quiescent state for this grace period, and record that fact if so. 2255 */ 2256 static void 2257 rcu_check_quiescent_state(struct rcu_data *rdp) 2258 { 2259 /* Check for grace-period ends and beginnings. */ 2260 note_gp_changes(rdp); 2261 2262 /* 2263 * Does this CPU still need to do its part for current grace period? 2264 * If no, return and let the other CPUs do their part as well. 2265 */ 2266 if (!rdp->core_needs_qs) 2267 return; 2268 2269 /* 2270 * Was there a quiescent state since the beginning of the grace 2271 * period? If no, then exit and wait for the next call. 2272 */ 2273 if (rdp->cpu_no_qs.b.norm) 2274 return; 2275 2276 /* 2277 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2278 * judge of that). 2279 */ 2280 rcu_report_qs_rdp(rdp->cpu, rdp); 2281 } 2282 2283 /* 2284 * Near the end of the offline process. Trace the fact that this CPU 2285 * is going offline. 2286 */ 2287 int rcutree_dying_cpu(unsigned int cpu) 2288 { 2289 bool blkd; 2290 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2291 struct rcu_node *rnp = rdp->mynode; 2292 2293 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2294 return 0; 2295 2296 blkd = !!(rnp->qsmask & rdp->grpmask); 2297 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 2298 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 2299 return 0; 2300 } 2301 2302 /* 2303 * All CPUs for the specified rcu_node structure have gone offline, 2304 * and all tasks that were preempted within an RCU read-side critical 2305 * section while running on one of those CPUs have since exited their RCU 2306 * read-side critical section. Some other CPU is reporting this fact with 2307 * the specified rcu_node structure's ->lock held and interrupts disabled. 2308 * This function therefore goes up the tree of rcu_node structures, 2309 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2310 * the leaf rcu_node structure's ->qsmaskinit field has already been 2311 * updated. 2312 * 2313 * This function does check that the specified rcu_node structure has 2314 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2315 * prematurely. That said, invoking it after the fact will cost you 2316 * a needless lock acquisition. So once it has done its work, don't 2317 * invoke it again. 2318 */ 2319 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2320 { 2321 long mask; 2322 struct rcu_node *rnp = rnp_leaf; 2323 2324 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2325 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2326 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2327 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2328 return; 2329 for (;;) { 2330 mask = rnp->grpmask; 2331 rnp = rnp->parent; 2332 if (!rnp) 2333 break; 2334 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2335 rnp->qsmaskinit &= ~mask; 2336 /* Between grace periods, so better already be zero! */ 2337 WARN_ON_ONCE(rnp->qsmask); 2338 if (rnp->qsmaskinit) { 2339 raw_spin_unlock_rcu_node(rnp); 2340 /* irqs remain disabled. */ 2341 return; 2342 } 2343 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2344 } 2345 } 2346 2347 /* 2348 * The CPU has been completely removed, and some other CPU is reporting 2349 * this fact from process context. Do the remainder of the cleanup. 2350 * There can only be one CPU hotplug operation at a time, so no need for 2351 * explicit locking. 2352 */ 2353 int rcutree_dead_cpu(unsigned int cpu) 2354 { 2355 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2356 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2357 2358 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2359 return 0; 2360 2361 /* Adjust any no-longer-needed kthreads. */ 2362 rcu_boost_kthread_setaffinity(rnp, -1); 2363 /* Do any needed no-CB deferred wakeups from this CPU. */ 2364 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); 2365 2366 // Stop-machine done, so allow nohz_full to disable tick. 2367 tick_dep_clear(TICK_DEP_BIT_RCU); 2368 return 0; 2369 } 2370 2371 /* 2372 * Invoke any RCU callbacks that have made it to the end of their grace 2373 * period. Thottle as specified by rdp->blimit. 2374 */ 2375 static void rcu_do_batch(struct rcu_data *rdp) 2376 { 2377 unsigned long flags; 2378 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2379 rcu_segcblist_is_offloaded(&rdp->cblist); 2380 struct rcu_head *rhp; 2381 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2382 long bl, count; 2383 long pending, tlimit = 0; 2384 2385 /* If no callbacks are ready, just return. */ 2386 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2387 trace_rcu_batch_start(rcu_state.name, 2388 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2389 trace_rcu_batch_end(rcu_state.name, 0, 2390 !rcu_segcblist_empty(&rdp->cblist), 2391 need_resched(), is_idle_task(current), 2392 rcu_is_callbacks_kthread()); 2393 return; 2394 } 2395 2396 /* 2397 * Extract the list of ready callbacks, disabling to prevent 2398 * races with call_rcu() from interrupt handlers. Leave the 2399 * callback counts, as rcu_barrier() needs to be conservative. 2400 */ 2401 local_irq_save(flags); 2402 rcu_nocb_lock(rdp); 2403 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2404 pending = rcu_segcblist_n_cbs(&rdp->cblist); 2405 bl = max(rdp->blimit, pending >> rcu_divisor); 2406 if (unlikely(bl > 100)) 2407 tlimit = local_clock() + rcu_resched_ns; 2408 trace_rcu_batch_start(rcu_state.name, 2409 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2410 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2411 if (offloaded) 2412 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2413 rcu_nocb_unlock_irqrestore(rdp, flags); 2414 2415 /* Invoke callbacks. */ 2416 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2417 rhp = rcu_cblist_dequeue(&rcl); 2418 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2419 rcu_callback_t f; 2420 2421 debug_rcu_head_unqueue(rhp); 2422 2423 rcu_lock_acquire(&rcu_callback_map); 2424 trace_rcu_invoke_callback(rcu_state.name, rhp); 2425 2426 f = rhp->func; 2427 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2428 f(rhp); 2429 2430 rcu_lock_release(&rcu_callback_map); 2431 2432 /* 2433 * Stop only if limit reached and CPU has something to do. 2434 * Note: The rcl structure counts down from zero. 2435 */ 2436 if (-rcl.len >= bl && !offloaded && 2437 (need_resched() || 2438 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2439 break; 2440 if (unlikely(tlimit)) { 2441 /* only call local_clock() every 32 callbacks */ 2442 if (likely((-rcl.len & 31) || local_clock() < tlimit)) 2443 continue; 2444 /* Exceeded the time limit, so leave. */ 2445 break; 2446 } 2447 if (offloaded) { 2448 WARN_ON_ONCE(in_serving_softirq()); 2449 local_bh_enable(); 2450 lockdep_assert_irqs_enabled(); 2451 cond_resched_tasks_rcu_qs(); 2452 lockdep_assert_irqs_enabled(); 2453 local_bh_disable(); 2454 } 2455 } 2456 2457 local_irq_save(flags); 2458 rcu_nocb_lock(rdp); 2459 count = -rcl.len; 2460 rdp->n_cbs_invoked += count; 2461 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2462 is_idle_task(current), rcu_is_callbacks_kthread()); 2463 2464 /* Update counts and requeue any remaining callbacks. */ 2465 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2466 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2467 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2468 2469 /* Reinstate batch limit if we have worked down the excess. */ 2470 count = rcu_segcblist_n_cbs(&rdp->cblist); 2471 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2472 rdp->blimit = blimit; 2473 2474 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2475 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2476 rdp->qlen_last_fqs_check = 0; 2477 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2478 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2479 rdp->qlen_last_fqs_check = count; 2480 2481 /* 2482 * The following usually indicates a double call_rcu(). To track 2483 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2484 */ 2485 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); 2486 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2487 count != 0 && rcu_segcblist_empty(&rdp->cblist)); 2488 2489 rcu_nocb_unlock_irqrestore(rdp, flags); 2490 2491 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2492 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) 2493 invoke_rcu_core(); 2494 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2495 } 2496 2497 /* 2498 * This function is invoked from each scheduling-clock interrupt, 2499 * and checks to see if this CPU is in a non-context-switch quiescent 2500 * state, for example, user mode or idle loop. It also schedules RCU 2501 * core processing. If the current grace period has gone on too long, 2502 * it will ask the scheduler to manufacture a context switch for the sole 2503 * purpose of providing a providing the needed quiescent state. 2504 */ 2505 void rcu_sched_clock_irq(int user) 2506 { 2507 trace_rcu_utilization(TPS("Start scheduler-tick")); 2508 raw_cpu_inc(rcu_data.ticks_this_gp); 2509 /* The load-acquire pairs with the store-release setting to true. */ 2510 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2511 /* Idle and userspace execution already are quiescent states. */ 2512 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2513 set_tsk_need_resched(current); 2514 set_preempt_need_resched(); 2515 } 2516 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2517 } 2518 rcu_flavor_sched_clock_irq(user); 2519 if (rcu_pending(user)) 2520 invoke_rcu_core(); 2521 2522 trace_rcu_utilization(TPS("End scheduler-tick")); 2523 } 2524 2525 /* 2526 * Scan the leaf rcu_node structures. For each structure on which all 2527 * CPUs have reported a quiescent state and on which there are tasks 2528 * blocking the current grace period, initiate RCU priority boosting. 2529 * Otherwise, invoke the specified function to check dyntick state for 2530 * each CPU that has not yet reported a quiescent state. 2531 */ 2532 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2533 { 2534 int cpu; 2535 unsigned long flags; 2536 unsigned long mask; 2537 struct rcu_data *rdp; 2538 struct rcu_node *rnp; 2539 2540 rcu_state.cbovld = rcu_state.cbovldnext; 2541 rcu_state.cbovldnext = false; 2542 rcu_for_each_leaf_node(rnp) { 2543 cond_resched_tasks_rcu_qs(); 2544 mask = 0; 2545 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2546 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2547 if (rnp->qsmask == 0) { 2548 if (!IS_ENABLED(CONFIG_PREEMPT_RCU) || 2549 rcu_preempt_blocked_readers_cgp(rnp)) { 2550 /* 2551 * No point in scanning bits because they 2552 * are all zero. But we might need to 2553 * priority-boost blocked readers. 2554 */ 2555 rcu_initiate_boost(rnp, flags); 2556 /* rcu_initiate_boost() releases rnp->lock */ 2557 continue; 2558 } 2559 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2560 continue; 2561 } 2562 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2563 rdp = per_cpu_ptr(&rcu_data, cpu); 2564 if (f(rdp)) { 2565 mask |= rdp->grpmask; 2566 rcu_disable_urgency_upon_qs(rdp); 2567 } 2568 } 2569 if (mask != 0) { 2570 /* Idle/offline CPUs, report (releases rnp->lock). */ 2571 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2572 } else { 2573 /* Nothing to do here, so just drop the lock. */ 2574 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2575 } 2576 } 2577 } 2578 2579 /* 2580 * Force quiescent states on reluctant CPUs, and also detect which 2581 * CPUs are in dyntick-idle mode. 2582 */ 2583 void rcu_force_quiescent_state(void) 2584 { 2585 unsigned long flags; 2586 bool ret; 2587 struct rcu_node *rnp; 2588 struct rcu_node *rnp_old = NULL; 2589 2590 /* Funnel through hierarchy to reduce memory contention. */ 2591 rnp = __this_cpu_read(rcu_data.mynode); 2592 for (; rnp != NULL; rnp = rnp->parent) { 2593 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2594 !raw_spin_trylock(&rnp->fqslock); 2595 if (rnp_old != NULL) 2596 raw_spin_unlock(&rnp_old->fqslock); 2597 if (ret) 2598 return; 2599 rnp_old = rnp; 2600 } 2601 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2602 2603 /* Reached the root of the rcu_node tree, acquire lock. */ 2604 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2605 raw_spin_unlock(&rnp_old->fqslock); 2606 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2607 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2608 return; /* Someone beat us to it. */ 2609 } 2610 WRITE_ONCE(rcu_state.gp_flags, 2611 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2612 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2613 rcu_gp_kthread_wake(); 2614 } 2615 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2616 2617 /* Perform RCU core processing work for the current CPU. */ 2618 static __latent_entropy void rcu_core(void) 2619 { 2620 unsigned long flags; 2621 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2622 struct rcu_node *rnp = rdp->mynode; 2623 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2624 rcu_segcblist_is_offloaded(&rdp->cblist); 2625 2626 if (cpu_is_offline(smp_processor_id())) 2627 return; 2628 trace_rcu_utilization(TPS("Start RCU core")); 2629 WARN_ON_ONCE(!rdp->beenonline); 2630 2631 /* Report any deferred quiescent states if preemption enabled. */ 2632 if (!(preempt_count() & PREEMPT_MASK)) { 2633 rcu_preempt_deferred_qs(current); 2634 } else if (rcu_preempt_need_deferred_qs(current)) { 2635 set_tsk_need_resched(current); 2636 set_preempt_need_resched(); 2637 } 2638 2639 /* Update RCU state based on any recent quiescent states. */ 2640 rcu_check_quiescent_state(rdp); 2641 2642 /* No grace period and unregistered callbacks? */ 2643 if (!rcu_gp_in_progress() && 2644 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { 2645 local_irq_save(flags); 2646 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2647 rcu_accelerate_cbs_unlocked(rnp, rdp); 2648 local_irq_restore(flags); 2649 } 2650 2651 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2652 2653 /* If there are callbacks ready, invoke them. */ 2654 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && 2655 likely(READ_ONCE(rcu_scheduler_fully_active))) 2656 rcu_do_batch(rdp); 2657 2658 /* Do any needed deferred wakeups of rcuo kthreads. */ 2659 do_nocb_deferred_wakeup(rdp); 2660 trace_rcu_utilization(TPS("End RCU core")); 2661 } 2662 2663 static void rcu_core_si(struct softirq_action *h) 2664 { 2665 rcu_core(); 2666 } 2667 2668 static void rcu_wake_cond(struct task_struct *t, int status) 2669 { 2670 /* 2671 * If the thread is yielding, only wake it when this 2672 * is invoked from idle 2673 */ 2674 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2675 wake_up_process(t); 2676 } 2677 2678 static void invoke_rcu_core_kthread(void) 2679 { 2680 struct task_struct *t; 2681 unsigned long flags; 2682 2683 local_irq_save(flags); 2684 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2685 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2686 if (t != NULL && t != current) 2687 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2688 local_irq_restore(flags); 2689 } 2690 2691 /* 2692 * Wake up this CPU's rcuc kthread to do RCU core processing. 2693 */ 2694 static void invoke_rcu_core(void) 2695 { 2696 if (!cpu_online(smp_processor_id())) 2697 return; 2698 if (use_softirq) 2699 raise_softirq(RCU_SOFTIRQ); 2700 else 2701 invoke_rcu_core_kthread(); 2702 } 2703 2704 static void rcu_cpu_kthread_park(unsigned int cpu) 2705 { 2706 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2707 } 2708 2709 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2710 { 2711 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2712 } 2713 2714 /* 2715 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2716 * the RCU softirq used in configurations of RCU that do not support RCU 2717 * priority boosting. 2718 */ 2719 static void rcu_cpu_kthread(unsigned int cpu) 2720 { 2721 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2722 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2723 int spincnt; 2724 2725 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2726 for (spincnt = 0; spincnt < 10; spincnt++) { 2727 local_bh_disable(); 2728 *statusp = RCU_KTHREAD_RUNNING; 2729 local_irq_disable(); 2730 work = *workp; 2731 *workp = 0; 2732 local_irq_enable(); 2733 if (work) 2734 rcu_core(); 2735 local_bh_enable(); 2736 if (*workp == 0) { 2737 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2738 *statusp = RCU_KTHREAD_WAITING; 2739 return; 2740 } 2741 } 2742 *statusp = RCU_KTHREAD_YIELDING; 2743 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2744 schedule_timeout_idle(2); 2745 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2746 *statusp = RCU_KTHREAD_WAITING; 2747 } 2748 2749 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2750 .store = &rcu_data.rcu_cpu_kthread_task, 2751 .thread_should_run = rcu_cpu_kthread_should_run, 2752 .thread_fn = rcu_cpu_kthread, 2753 .thread_comm = "rcuc/%u", 2754 .setup = rcu_cpu_kthread_setup, 2755 .park = rcu_cpu_kthread_park, 2756 }; 2757 2758 /* 2759 * Spawn per-CPU RCU core processing kthreads. 2760 */ 2761 static int __init rcu_spawn_core_kthreads(void) 2762 { 2763 int cpu; 2764 2765 for_each_possible_cpu(cpu) 2766 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2767 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq) 2768 return 0; 2769 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2770 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2771 return 0; 2772 } 2773 early_initcall(rcu_spawn_core_kthreads); 2774 2775 /* 2776 * Handle any core-RCU processing required by a call_rcu() invocation. 2777 */ 2778 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2779 unsigned long flags) 2780 { 2781 /* 2782 * If called from an extended quiescent state, invoke the RCU 2783 * core in order to force a re-evaluation of RCU's idleness. 2784 */ 2785 if (!rcu_is_watching()) 2786 invoke_rcu_core(); 2787 2788 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2789 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2790 return; 2791 2792 /* 2793 * Force the grace period if too many callbacks or too long waiting. 2794 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2795 * if some other CPU has recently done so. Also, don't bother 2796 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2797 * is the only one waiting for a grace period to complete. 2798 */ 2799 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2800 rdp->qlen_last_fqs_check + qhimark)) { 2801 2802 /* Are we ignoring a completed grace period? */ 2803 note_gp_changes(rdp); 2804 2805 /* Start a new grace period if one not already started. */ 2806 if (!rcu_gp_in_progress()) { 2807 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2808 } else { 2809 /* Give the grace period a kick. */ 2810 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 2811 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2812 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2813 rcu_force_quiescent_state(); 2814 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2815 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2816 } 2817 } 2818 } 2819 2820 /* 2821 * RCU callback function to leak a callback. 2822 */ 2823 static void rcu_leak_callback(struct rcu_head *rhp) 2824 { 2825 } 2826 2827 /* 2828 * Check and if necessary update the leaf rcu_node structure's 2829 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2830 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 2831 * structure's ->lock. 2832 */ 2833 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 2834 { 2835 raw_lockdep_assert_held_rcu_node(rnp); 2836 if (qovld_calc <= 0) 2837 return; // Early boot and wildcard value set. 2838 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 2839 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 2840 else 2841 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 2842 } 2843 2844 /* 2845 * Check and if necessary update the leaf rcu_node structure's 2846 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2847 * number of queued RCU callbacks. No locks need be held, but the 2848 * caller must have disabled interrupts. 2849 * 2850 * Note that this function ignores the possibility that there are a lot 2851 * of callbacks all of which have already seen the end of their respective 2852 * grace periods. This omission is due to the need for no-CBs CPUs to 2853 * be holding ->nocb_lock to do this check, which is too heavy for a 2854 * common-case operation. 2855 */ 2856 static void check_cb_ovld(struct rcu_data *rdp) 2857 { 2858 struct rcu_node *const rnp = rdp->mynode; 2859 2860 if (qovld_calc <= 0 || 2861 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 2862 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 2863 return; // Early boot wildcard value or already set correctly. 2864 raw_spin_lock_rcu_node(rnp); 2865 check_cb_ovld_locked(rdp, rnp); 2866 raw_spin_unlock_rcu_node(rnp); 2867 } 2868 2869 /* Helper function for call_rcu() and friends. */ 2870 static void 2871 __call_rcu(struct rcu_head *head, rcu_callback_t func) 2872 { 2873 unsigned long flags; 2874 struct rcu_data *rdp; 2875 bool was_alldone; 2876 2877 /* Misaligned rcu_head! */ 2878 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2879 2880 if (debug_rcu_head_queue(head)) { 2881 /* 2882 * Probable double call_rcu(), so leak the callback. 2883 * Use rcu:rcu_callback trace event to find the previous 2884 * time callback was passed to __call_rcu(). 2885 */ 2886 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2887 head, head->func); 2888 WRITE_ONCE(head->func, rcu_leak_callback); 2889 return; 2890 } 2891 head->func = func; 2892 head->next = NULL; 2893 local_irq_save(flags); 2894 kasan_record_aux_stack(head); 2895 rdp = this_cpu_ptr(&rcu_data); 2896 2897 /* Add the callback to our list. */ 2898 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 2899 // This can trigger due to call_rcu() from offline CPU: 2900 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 2901 WARN_ON_ONCE(!rcu_is_watching()); 2902 // Very early boot, before rcu_init(). Initialize if needed 2903 // and then drop through to queue the callback. 2904 if (rcu_segcblist_empty(&rdp->cblist)) 2905 rcu_segcblist_init(&rdp->cblist); 2906 } 2907 2908 check_cb_ovld(rdp); 2909 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) 2910 return; // Enqueued onto ->nocb_bypass, so just leave. 2911 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. 2912 rcu_segcblist_enqueue(&rdp->cblist, head); 2913 if (__is_kvfree_rcu_offset((unsigned long)func)) 2914 trace_rcu_kvfree_callback(rcu_state.name, head, 2915 (unsigned long)func, 2916 rcu_segcblist_n_cbs(&rdp->cblist)); 2917 else 2918 trace_rcu_callback(rcu_state.name, head, 2919 rcu_segcblist_n_cbs(&rdp->cblist)); 2920 2921 /* Go handle any RCU core processing required. */ 2922 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2923 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { 2924 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2925 } else { 2926 __call_rcu_core(rdp, head, flags); 2927 local_irq_restore(flags); 2928 } 2929 } 2930 2931 /** 2932 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2933 * @head: structure to be used for queueing the RCU updates. 2934 * @func: actual callback function to be invoked after the grace period 2935 * 2936 * The callback function will be invoked some time after a full grace 2937 * period elapses, in other words after all pre-existing RCU read-side 2938 * critical sections have completed. However, the callback function 2939 * might well execute concurrently with RCU read-side critical sections 2940 * that started after call_rcu() was invoked. RCU read-side critical 2941 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and 2942 * may be nested. In addition, regions of code across which interrupts, 2943 * preemption, or softirqs have been disabled also serve as RCU read-side 2944 * critical sections. This includes hardware interrupt handlers, softirq 2945 * handlers, and NMI handlers. 2946 * 2947 * Note that all CPUs must agree that the grace period extended beyond 2948 * all pre-existing RCU read-side critical section. On systems with more 2949 * than one CPU, this means that when "func()" is invoked, each CPU is 2950 * guaranteed to have executed a full memory barrier since the end of its 2951 * last RCU read-side critical section whose beginning preceded the call 2952 * to call_rcu(). It also means that each CPU executing an RCU read-side 2953 * critical section that continues beyond the start of "func()" must have 2954 * executed a memory barrier after the call_rcu() but before the beginning 2955 * of that RCU read-side critical section. Note that these guarantees 2956 * include CPUs that are offline, idle, or executing in user mode, as 2957 * well as CPUs that are executing in the kernel. 2958 * 2959 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 2960 * resulting RCU callback function "func()", then both CPU A and CPU B are 2961 * guaranteed to execute a full memory barrier during the time interval 2962 * between the call to call_rcu() and the invocation of "func()" -- even 2963 * if CPU A and CPU B are the same CPU (but again only if the system has 2964 * more than one CPU). 2965 */ 2966 void call_rcu(struct rcu_head *head, rcu_callback_t func) 2967 { 2968 __call_rcu(head, func); 2969 } 2970 EXPORT_SYMBOL_GPL(call_rcu); 2971 2972 2973 /* Maximum number of jiffies to wait before draining a batch. */ 2974 #define KFREE_DRAIN_JIFFIES (HZ / 50) 2975 #define KFREE_N_BATCHES 2 2976 #define FREE_N_CHANNELS 2 2977 2978 /** 2979 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers 2980 * @nr_records: Number of active pointers in the array 2981 * @next: Next bulk object in the block chain 2982 * @records: Array of the kvfree_rcu() pointers 2983 */ 2984 struct kvfree_rcu_bulk_data { 2985 unsigned long nr_records; 2986 struct kvfree_rcu_bulk_data *next; 2987 void *records[]; 2988 }; 2989 2990 /* 2991 * This macro defines how many entries the "records" array 2992 * will contain. It is based on the fact that the size of 2993 * kvfree_rcu_bulk_data structure becomes exactly one page. 2994 */ 2995 #define KVFREE_BULK_MAX_ENTR \ 2996 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *)) 2997 2998 /** 2999 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests 3000 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period 3001 * @head_free: List of kfree_rcu() objects waiting for a grace period 3002 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period 3003 * @krcp: Pointer to @kfree_rcu_cpu structure 3004 */ 3005 3006 struct kfree_rcu_cpu_work { 3007 struct rcu_work rcu_work; 3008 struct rcu_head *head_free; 3009 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS]; 3010 struct kfree_rcu_cpu *krcp; 3011 }; 3012 3013 /** 3014 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period 3015 * @head: List of kfree_rcu() objects not yet waiting for a grace period 3016 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period 3017 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period 3018 * @lock: Synchronize access to this structure 3019 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES 3020 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending 3021 * @initialized: The @rcu_work fields have been initialized 3022 * @count: Number of objects for which GP not started 3023 * 3024 * This is a per-CPU structure. The reason that it is not included in 3025 * the rcu_data structure is to permit this code to be extracted from 3026 * the RCU files. Such extraction could allow further optimization of 3027 * the interactions with the slab allocators. 3028 */ 3029 struct kfree_rcu_cpu { 3030 struct rcu_head *head; 3031 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS]; 3032 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; 3033 raw_spinlock_t lock; 3034 struct delayed_work monitor_work; 3035 bool monitor_todo; 3036 bool initialized; 3037 int count; 3038 3039 /* 3040 * A simple cache list that contains objects for 3041 * reuse purpose. In order to save some per-cpu 3042 * space the list is singular. Even though it is 3043 * lockless an access has to be protected by the 3044 * per-cpu lock. 3045 */ 3046 struct llist_head bkvcache; 3047 int nr_bkv_objs; 3048 }; 3049 3050 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = { 3051 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock), 3052 }; 3053 3054 static __always_inline void 3055 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead) 3056 { 3057 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3058 int i; 3059 3060 for (i = 0; i < bhead->nr_records; i++) 3061 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i])); 3062 #endif 3063 } 3064 3065 static inline struct kfree_rcu_cpu * 3066 krc_this_cpu_lock(unsigned long *flags) 3067 { 3068 struct kfree_rcu_cpu *krcp; 3069 3070 local_irq_save(*flags); // For safely calling this_cpu_ptr(). 3071 krcp = this_cpu_ptr(&krc); 3072 raw_spin_lock(&krcp->lock); 3073 3074 return krcp; 3075 } 3076 3077 static inline void 3078 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) 3079 { 3080 raw_spin_unlock(&krcp->lock); 3081 local_irq_restore(flags); 3082 } 3083 3084 static inline struct kvfree_rcu_bulk_data * 3085 get_cached_bnode(struct kfree_rcu_cpu *krcp) 3086 { 3087 if (!krcp->nr_bkv_objs) 3088 return NULL; 3089 3090 krcp->nr_bkv_objs--; 3091 return (struct kvfree_rcu_bulk_data *) 3092 llist_del_first(&krcp->bkvcache); 3093 } 3094 3095 static inline bool 3096 put_cached_bnode(struct kfree_rcu_cpu *krcp, 3097 struct kvfree_rcu_bulk_data *bnode) 3098 { 3099 // Check the limit. 3100 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) 3101 return false; 3102 3103 llist_add((struct llist_node *) bnode, &krcp->bkvcache); 3104 krcp->nr_bkv_objs++; 3105 return true; 3106 3107 } 3108 3109 /* 3110 * This function is invoked in workqueue context after a grace period. 3111 * It frees all the objects queued on ->bhead_free or ->head_free. 3112 */ 3113 static void kfree_rcu_work(struct work_struct *work) 3114 { 3115 unsigned long flags; 3116 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext; 3117 struct rcu_head *head, *next; 3118 struct kfree_rcu_cpu *krcp; 3119 struct kfree_rcu_cpu_work *krwp; 3120 int i, j; 3121 3122 krwp = container_of(to_rcu_work(work), 3123 struct kfree_rcu_cpu_work, rcu_work); 3124 krcp = krwp->krcp; 3125 3126 raw_spin_lock_irqsave(&krcp->lock, flags); 3127 // Channels 1 and 2. 3128 for (i = 0; i < FREE_N_CHANNELS; i++) { 3129 bkvhead[i] = krwp->bkvhead_free[i]; 3130 krwp->bkvhead_free[i] = NULL; 3131 } 3132 3133 // Channel 3. 3134 head = krwp->head_free; 3135 krwp->head_free = NULL; 3136 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3137 3138 // Handle two first channels. 3139 for (i = 0; i < FREE_N_CHANNELS; i++) { 3140 for (; bkvhead[i]; bkvhead[i] = bnext) { 3141 bnext = bkvhead[i]->next; 3142 debug_rcu_bhead_unqueue(bkvhead[i]); 3143 3144 rcu_lock_acquire(&rcu_callback_map); 3145 if (i == 0) { // kmalloc() / kfree(). 3146 trace_rcu_invoke_kfree_bulk_callback( 3147 rcu_state.name, bkvhead[i]->nr_records, 3148 bkvhead[i]->records); 3149 3150 kfree_bulk(bkvhead[i]->nr_records, 3151 bkvhead[i]->records); 3152 } else { // vmalloc() / vfree(). 3153 for (j = 0; j < bkvhead[i]->nr_records; j++) { 3154 trace_rcu_invoke_kvfree_callback( 3155 rcu_state.name, 3156 bkvhead[i]->records[j], 0); 3157 3158 vfree(bkvhead[i]->records[j]); 3159 } 3160 } 3161 rcu_lock_release(&rcu_callback_map); 3162 3163 krcp = krc_this_cpu_lock(&flags); 3164 if (put_cached_bnode(krcp, bkvhead[i])) 3165 bkvhead[i] = NULL; 3166 krc_this_cpu_unlock(krcp, flags); 3167 3168 if (bkvhead[i]) 3169 free_page((unsigned long) bkvhead[i]); 3170 3171 cond_resched_tasks_rcu_qs(); 3172 } 3173 } 3174 3175 /* 3176 * Emergency case only. It can happen under low memory 3177 * condition when an allocation gets failed, so the "bulk" 3178 * path can not be temporary maintained. 3179 */ 3180 for (; head; head = next) { 3181 unsigned long offset = (unsigned long)head->func; 3182 void *ptr = (void *)head - offset; 3183 3184 next = head->next; 3185 debug_rcu_head_unqueue((struct rcu_head *)ptr); 3186 rcu_lock_acquire(&rcu_callback_map); 3187 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); 3188 3189 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) 3190 kvfree(ptr); 3191 3192 rcu_lock_release(&rcu_callback_map); 3193 cond_resched_tasks_rcu_qs(); 3194 } 3195 } 3196 3197 /* 3198 * Schedule the kfree batch RCU work to run in workqueue context after a GP. 3199 * 3200 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES 3201 * timeout has been reached. 3202 */ 3203 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) 3204 { 3205 struct kfree_rcu_cpu_work *krwp; 3206 bool repeat = false; 3207 int i, j; 3208 3209 lockdep_assert_held(&krcp->lock); 3210 3211 for (i = 0; i < KFREE_N_BATCHES; i++) { 3212 krwp = &(krcp->krw_arr[i]); 3213 3214 /* 3215 * Try to detach bkvhead or head and attach it over any 3216 * available corresponding free channel. It can be that 3217 * a previous RCU batch is in progress, it means that 3218 * immediately to queue another one is not possible so 3219 * return false to tell caller to retry. 3220 */ 3221 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || 3222 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || 3223 (krcp->head && !krwp->head_free)) { 3224 // Channel 1 corresponds to SLAB ptrs. 3225 // Channel 2 corresponds to vmalloc ptrs. 3226 for (j = 0; j < FREE_N_CHANNELS; j++) { 3227 if (!krwp->bkvhead_free[j]) { 3228 krwp->bkvhead_free[j] = krcp->bkvhead[j]; 3229 krcp->bkvhead[j] = NULL; 3230 } 3231 } 3232 3233 // Channel 3 corresponds to emergency path. 3234 if (!krwp->head_free) { 3235 krwp->head_free = krcp->head; 3236 krcp->head = NULL; 3237 } 3238 3239 WRITE_ONCE(krcp->count, 0); 3240 3241 /* 3242 * One work is per one batch, so there are three 3243 * "free channels", the batch can handle. It can 3244 * be that the work is in the pending state when 3245 * channels have been detached following by each 3246 * other. 3247 */ 3248 queue_rcu_work(system_wq, &krwp->rcu_work); 3249 } 3250 3251 // Repeat if any "free" corresponding channel is still busy. 3252 if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) 3253 repeat = true; 3254 } 3255 3256 return !repeat; 3257 } 3258 3259 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp, 3260 unsigned long flags) 3261 { 3262 // Attempt to start a new batch. 3263 krcp->monitor_todo = false; 3264 if (queue_kfree_rcu_work(krcp)) { 3265 // Success! Our job is done here. 3266 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3267 return; 3268 } 3269 3270 // Previous RCU batch still in progress, try again later. 3271 krcp->monitor_todo = true; 3272 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3273 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3274 } 3275 3276 /* 3277 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. 3278 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch. 3279 */ 3280 static void kfree_rcu_monitor(struct work_struct *work) 3281 { 3282 unsigned long flags; 3283 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu, 3284 monitor_work.work); 3285 3286 raw_spin_lock_irqsave(&krcp->lock, flags); 3287 if (krcp->monitor_todo) 3288 kfree_rcu_drain_unlock(krcp, flags); 3289 else 3290 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3291 } 3292 3293 static inline bool 3294 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) 3295 { 3296 struct kvfree_rcu_bulk_data *bnode; 3297 int idx; 3298 3299 if (unlikely(!krcp->initialized)) 3300 return false; 3301 3302 lockdep_assert_held(&krcp->lock); 3303 idx = !!is_vmalloc_addr(ptr); 3304 3305 /* Check if a new block is required. */ 3306 if (!krcp->bkvhead[idx] || 3307 krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { 3308 bnode = get_cached_bnode(krcp); 3309 if (!bnode) { 3310 /* 3311 * To keep this path working on raw non-preemptible 3312 * sections, prevent the optional entry into the 3313 * allocator as it uses sleeping locks. In fact, even 3314 * if the caller of kfree_rcu() is preemptible, this 3315 * path still is not, as krcp->lock is a raw spinlock. 3316 * With additional page pre-allocation in the works, 3317 * hitting this return is going to be much less likely. 3318 */ 3319 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 3320 return false; 3321 3322 /* 3323 * NOTE: For one argument of kvfree_rcu() we can 3324 * drop the lock and get the page in sleepable 3325 * context. That would allow to maintain an array 3326 * for the CONFIG_PREEMPT_RT as well if no cached 3327 * pages are available. 3328 */ 3329 bnode = (struct kvfree_rcu_bulk_data *) 3330 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 3331 } 3332 3333 /* Switch to emergency path. */ 3334 if (unlikely(!bnode)) 3335 return false; 3336 3337 /* Initialize the new block. */ 3338 bnode->nr_records = 0; 3339 bnode->next = krcp->bkvhead[idx]; 3340 3341 /* Attach it to the head. */ 3342 krcp->bkvhead[idx] = bnode; 3343 } 3344 3345 /* Finally insert. */ 3346 krcp->bkvhead[idx]->records 3347 [krcp->bkvhead[idx]->nr_records++] = ptr; 3348 3349 return true; 3350 } 3351 3352 /* 3353 * Queue a request for lazy invocation of appropriate free routine after a 3354 * grace period. Please note there are three paths are maintained, two are the 3355 * main ones that use array of pointers interface and third one is emergency 3356 * one, that is used only when the main path can not be maintained temporary, 3357 * due to memory pressure. 3358 * 3359 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained 3360 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will 3361 * be free'd in workqueue context. This allows us to: batch requests together to 3362 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. 3363 */ 3364 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 3365 { 3366 unsigned long flags; 3367 struct kfree_rcu_cpu *krcp; 3368 bool success; 3369 void *ptr; 3370 3371 if (head) { 3372 ptr = (void *) head - (unsigned long) func; 3373 } else { 3374 /* 3375 * Please note there is a limitation for the head-less 3376 * variant, that is why there is a clear rule for such 3377 * objects: it can be used from might_sleep() context 3378 * only. For other places please embed an rcu_head to 3379 * your data. 3380 */ 3381 might_sleep(); 3382 ptr = (unsigned long *) func; 3383 } 3384 3385 krcp = krc_this_cpu_lock(&flags); 3386 3387 // Queue the object but don't yet schedule the batch. 3388 if (debug_rcu_head_queue(ptr)) { 3389 // Probable double kfree_rcu(), just leak. 3390 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", 3391 __func__, head); 3392 3393 // Mark as success and leave. 3394 success = true; 3395 goto unlock_return; 3396 } 3397 3398 /* 3399 * Under high memory pressure GFP_NOWAIT can fail, 3400 * in that case the emergency path is maintained. 3401 */ 3402 success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); 3403 if (!success) { 3404 if (head == NULL) 3405 // Inline if kvfree_rcu(one_arg) call. 3406 goto unlock_return; 3407 3408 head->func = func; 3409 head->next = krcp->head; 3410 krcp->head = head; 3411 success = true; 3412 } 3413 3414 WRITE_ONCE(krcp->count, krcp->count + 1); 3415 3416 // Set timer to drain after KFREE_DRAIN_JIFFIES. 3417 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3418 !krcp->monitor_todo) { 3419 krcp->monitor_todo = true; 3420 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3421 } 3422 3423 unlock_return: 3424 krc_this_cpu_unlock(krcp, flags); 3425 3426 /* 3427 * Inline kvfree() after synchronize_rcu(). We can do 3428 * it from might_sleep() context only, so the current 3429 * CPU can pass the QS state. 3430 */ 3431 if (!success) { 3432 debug_rcu_head_unqueue((struct rcu_head *) ptr); 3433 synchronize_rcu(); 3434 kvfree(ptr); 3435 } 3436 } 3437 EXPORT_SYMBOL_GPL(kvfree_call_rcu); 3438 3439 static unsigned long 3440 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 3441 { 3442 int cpu; 3443 unsigned long count = 0; 3444 3445 /* Snapshot count of all CPUs */ 3446 for_each_online_cpu(cpu) { 3447 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3448 3449 count += READ_ONCE(krcp->count); 3450 } 3451 3452 return count; 3453 } 3454 3455 static unsigned long 3456 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 3457 { 3458 int cpu, freed = 0; 3459 unsigned long flags; 3460 3461 for_each_online_cpu(cpu) { 3462 int count; 3463 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3464 3465 count = krcp->count; 3466 raw_spin_lock_irqsave(&krcp->lock, flags); 3467 if (krcp->monitor_todo) 3468 kfree_rcu_drain_unlock(krcp, flags); 3469 else 3470 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3471 3472 sc->nr_to_scan -= count; 3473 freed += count; 3474 3475 if (sc->nr_to_scan <= 0) 3476 break; 3477 } 3478 3479 return freed == 0 ? SHRINK_STOP : freed; 3480 } 3481 3482 static struct shrinker kfree_rcu_shrinker = { 3483 .count_objects = kfree_rcu_shrink_count, 3484 .scan_objects = kfree_rcu_shrink_scan, 3485 .batch = 0, 3486 .seeks = DEFAULT_SEEKS, 3487 }; 3488 3489 void __init kfree_rcu_scheduler_running(void) 3490 { 3491 int cpu; 3492 unsigned long flags; 3493 3494 for_each_online_cpu(cpu) { 3495 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3496 3497 raw_spin_lock_irqsave(&krcp->lock, flags); 3498 if (!krcp->head || krcp->monitor_todo) { 3499 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3500 continue; 3501 } 3502 krcp->monitor_todo = true; 3503 schedule_delayed_work_on(cpu, &krcp->monitor_work, 3504 KFREE_DRAIN_JIFFIES); 3505 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3506 } 3507 } 3508 3509 /* 3510 * During early boot, any blocking grace-period wait automatically 3511 * implies a grace period. Later on, this is never the case for PREEMPTION. 3512 * 3513 * Howevr, because a context switch is a grace period for !PREEMPTION, any 3514 * blocking grace-period wait automatically implies a grace period if 3515 * there is only one CPU online at any point time during execution of 3516 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 3517 * occasionally incorrectly indicate that there are multiple CPUs online 3518 * when there was in fact only one the whole time, as this just adds some 3519 * overhead: RCU still operates correctly. 3520 */ 3521 static int rcu_blocking_is_gp(void) 3522 { 3523 int ret; 3524 3525 if (IS_ENABLED(CONFIG_PREEMPTION)) 3526 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 3527 might_sleep(); /* Check for RCU read-side critical section. */ 3528 preempt_disable(); 3529 ret = num_online_cpus() <= 1; 3530 preempt_enable(); 3531 return ret; 3532 } 3533 3534 /** 3535 * synchronize_rcu - wait until a grace period has elapsed. 3536 * 3537 * Control will return to the caller some time after a full grace 3538 * period has elapsed, in other words after all currently executing RCU 3539 * read-side critical sections have completed. Note, however, that 3540 * upon return from synchronize_rcu(), the caller might well be executing 3541 * concurrently with new RCU read-side critical sections that began while 3542 * synchronize_rcu() was waiting. RCU read-side critical sections are 3543 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 3544 * In addition, regions of code across which interrupts, preemption, or 3545 * softirqs have been disabled also serve as RCU read-side critical 3546 * sections. This includes hardware interrupt handlers, softirq handlers, 3547 * and NMI handlers. 3548 * 3549 * Note that this guarantee implies further memory-ordering guarantees. 3550 * On systems with more than one CPU, when synchronize_rcu() returns, 3551 * each CPU is guaranteed to have executed a full memory barrier since 3552 * the end of its last RCU read-side critical section whose beginning 3553 * preceded the call to synchronize_rcu(). In addition, each CPU having 3554 * an RCU read-side critical section that extends beyond the return from 3555 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3556 * after the beginning of synchronize_rcu() and before the beginning of 3557 * that RCU read-side critical section. Note that these guarantees include 3558 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3559 * that are executing in the kernel. 3560 * 3561 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3562 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3563 * to have executed a full memory barrier during the execution of 3564 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3565 * again only if the system has more than one CPU). 3566 */ 3567 void synchronize_rcu(void) 3568 { 3569 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3570 lock_is_held(&rcu_lock_map) || 3571 lock_is_held(&rcu_sched_lock_map), 3572 "Illegal synchronize_rcu() in RCU read-side critical section"); 3573 if (rcu_blocking_is_gp()) 3574 return; 3575 if (rcu_gp_is_expedited()) 3576 synchronize_rcu_expedited(); 3577 else 3578 wait_rcu_gp(call_rcu); 3579 } 3580 EXPORT_SYMBOL_GPL(synchronize_rcu); 3581 3582 /** 3583 * get_state_synchronize_rcu - Snapshot current RCU state 3584 * 3585 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3586 * to determine whether or not a full grace period has elapsed in the 3587 * meantime. 3588 */ 3589 unsigned long get_state_synchronize_rcu(void) 3590 { 3591 /* 3592 * Any prior manipulation of RCU-protected data must happen 3593 * before the load from ->gp_seq. 3594 */ 3595 smp_mb(); /* ^^^ */ 3596 return rcu_seq_snap(&rcu_state.gp_seq); 3597 } 3598 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3599 3600 /** 3601 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3602 * 3603 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 3604 * 3605 * If a full RCU grace period has elapsed since the earlier call to 3606 * get_state_synchronize_rcu(), just return. Otherwise, invoke 3607 * synchronize_rcu() to wait for a full grace period. 3608 * 3609 * Yes, this function does not take counter wrap into account. But 3610 * counter wrap is harmless. If the counter wraps, we have waited for 3611 * more than 2 billion grace periods (and way more on a 64-bit system!), 3612 * so waiting for one additional grace period should be just fine. 3613 */ 3614 void cond_synchronize_rcu(unsigned long oldstate) 3615 { 3616 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) 3617 synchronize_rcu(); 3618 else 3619 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3620 } 3621 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3622 3623 /* 3624 * Check to see if there is any immediate RCU-related work to be done by 3625 * the current CPU, returning 1 if so and zero otherwise. The checks are 3626 * in order of increasing expense: checks that can be carried out against 3627 * CPU-local state are performed first. However, we must check for CPU 3628 * stalls first, else we might not get a chance. 3629 */ 3630 static int rcu_pending(int user) 3631 { 3632 bool gp_in_progress; 3633 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3634 struct rcu_node *rnp = rdp->mynode; 3635 3636 /* Check for CPU stalls, if enabled. */ 3637 check_cpu_stall(rdp); 3638 3639 /* Does this CPU need a deferred NOCB wakeup? */ 3640 if (rcu_nocb_need_deferred_wakeup(rdp)) 3641 return 1; 3642 3643 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3644 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) 3645 return 0; 3646 3647 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3648 gp_in_progress = rcu_gp_in_progress(); 3649 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3650 return 1; 3651 3652 /* Does this CPU have callbacks ready to invoke? */ 3653 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 3654 return 1; 3655 3656 /* Has RCU gone idle with this CPU needing another grace period? */ 3657 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3658 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) || 3659 !rcu_segcblist_is_offloaded(&rdp->cblist)) && 3660 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3661 return 1; 3662 3663 /* Have RCU grace period completed or started? */ 3664 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3665 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3666 return 1; 3667 3668 /* nothing to do */ 3669 return 0; 3670 } 3671 3672 /* 3673 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3674 * the compiler is expected to optimize this away. 3675 */ 3676 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3677 { 3678 trace_rcu_barrier(rcu_state.name, s, cpu, 3679 atomic_read(&rcu_state.barrier_cpu_count), done); 3680 } 3681 3682 /* 3683 * RCU callback function for rcu_barrier(). If we are last, wake 3684 * up the task executing rcu_barrier(). 3685 * 3686 * Note that the value of rcu_state.barrier_sequence must be captured 3687 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3688 * other CPUs might count the value down to zero before this CPU gets 3689 * around to invoking rcu_barrier_trace(), which might result in bogus 3690 * data from the next instance of rcu_barrier(). 3691 */ 3692 static void rcu_barrier_callback(struct rcu_head *rhp) 3693 { 3694 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3695 3696 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3697 rcu_barrier_trace(TPS("LastCB"), -1, s); 3698 complete(&rcu_state.barrier_completion); 3699 } else { 3700 rcu_barrier_trace(TPS("CB"), -1, s); 3701 } 3702 } 3703 3704 /* 3705 * Called with preemption disabled, and from cross-cpu IRQ context. 3706 */ 3707 static void rcu_barrier_func(void *cpu_in) 3708 { 3709 uintptr_t cpu = (uintptr_t)cpu_in; 3710 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3711 3712 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3713 rdp->barrier_head.func = rcu_barrier_callback; 3714 debug_rcu_head_queue(&rdp->barrier_head); 3715 rcu_nocb_lock(rdp); 3716 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); 3717 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3718 atomic_inc(&rcu_state.barrier_cpu_count); 3719 } else { 3720 debug_rcu_head_unqueue(&rdp->barrier_head); 3721 rcu_barrier_trace(TPS("IRQNQ"), -1, 3722 rcu_state.barrier_sequence); 3723 } 3724 rcu_nocb_unlock(rdp); 3725 } 3726 3727 /** 3728 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3729 * 3730 * Note that this primitive does not necessarily wait for an RCU grace period 3731 * to complete. For example, if there are no RCU callbacks queued anywhere 3732 * in the system, then rcu_barrier() is within its rights to return 3733 * immediately, without waiting for anything, much less an RCU grace period. 3734 */ 3735 void rcu_barrier(void) 3736 { 3737 uintptr_t cpu; 3738 struct rcu_data *rdp; 3739 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3740 3741 rcu_barrier_trace(TPS("Begin"), -1, s); 3742 3743 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3744 mutex_lock(&rcu_state.barrier_mutex); 3745 3746 /* Did someone else do our work for us? */ 3747 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3748 rcu_barrier_trace(TPS("EarlyExit"), -1, 3749 rcu_state.barrier_sequence); 3750 smp_mb(); /* caller's subsequent code after above check. */ 3751 mutex_unlock(&rcu_state.barrier_mutex); 3752 return; 3753 } 3754 3755 /* Mark the start of the barrier operation. */ 3756 rcu_seq_start(&rcu_state.barrier_sequence); 3757 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 3758 3759 /* 3760 * Initialize the count to two rather than to zero in order 3761 * to avoid a too-soon return to zero in case of an immediate 3762 * invocation of the just-enqueued callback (or preemption of 3763 * this task). Exclude CPU-hotplug operations to ensure that no 3764 * offline non-offloaded CPU has callbacks queued. 3765 */ 3766 init_completion(&rcu_state.barrier_completion); 3767 atomic_set(&rcu_state.barrier_cpu_count, 2); 3768 get_online_cpus(); 3769 3770 /* 3771 * Force each CPU with callbacks to register a new callback. 3772 * When that callback is invoked, we will know that all of the 3773 * corresponding CPU's preceding callbacks have been invoked. 3774 */ 3775 for_each_possible_cpu(cpu) { 3776 rdp = per_cpu_ptr(&rcu_data, cpu); 3777 if (cpu_is_offline(cpu) && 3778 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3779 continue; 3780 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { 3781 rcu_barrier_trace(TPS("OnlineQ"), cpu, 3782 rcu_state.barrier_sequence); 3783 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1); 3784 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && 3785 cpu_is_offline(cpu)) { 3786 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, 3787 rcu_state.barrier_sequence); 3788 local_irq_disable(); 3789 rcu_barrier_func((void *)cpu); 3790 local_irq_enable(); 3791 } else if (cpu_is_offline(cpu)) { 3792 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu, 3793 rcu_state.barrier_sequence); 3794 } else { 3795 rcu_barrier_trace(TPS("OnlineNQ"), cpu, 3796 rcu_state.barrier_sequence); 3797 } 3798 } 3799 put_online_cpus(); 3800 3801 /* 3802 * Now that we have an rcu_barrier_callback() callback on each 3803 * CPU, and thus each counted, remove the initial count. 3804 */ 3805 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 3806 complete(&rcu_state.barrier_completion); 3807 3808 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3809 wait_for_completion(&rcu_state.barrier_completion); 3810 3811 /* Mark the end of the barrier operation. */ 3812 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 3813 rcu_seq_end(&rcu_state.barrier_sequence); 3814 3815 /* Other rcu_barrier() invocations can now safely proceed. */ 3816 mutex_unlock(&rcu_state.barrier_mutex); 3817 } 3818 EXPORT_SYMBOL_GPL(rcu_barrier); 3819 3820 /* 3821 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 3822 * first CPU in a given leaf rcu_node structure coming online. The caller 3823 * must hold the corresponding leaf rcu_node ->lock with interrrupts 3824 * disabled. 3825 */ 3826 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3827 { 3828 long mask; 3829 long oldmask; 3830 struct rcu_node *rnp = rnp_leaf; 3831 3832 raw_lockdep_assert_held_rcu_node(rnp_leaf); 3833 WARN_ON_ONCE(rnp->wait_blkd_tasks); 3834 for (;;) { 3835 mask = rnp->grpmask; 3836 rnp = rnp->parent; 3837 if (rnp == NULL) 3838 return; 3839 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3840 oldmask = rnp->qsmaskinit; 3841 rnp->qsmaskinit |= mask; 3842 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3843 if (oldmask) 3844 return; 3845 } 3846 } 3847 3848 /* 3849 * Do boot-time initialization of a CPU's per-CPU RCU data. 3850 */ 3851 static void __init 3852 rcu_boot_init_percpu_data(int cpu) 3853 { 3854 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3855 3856 /* Set up local state, ensuring consistent view of global state. */ 3857 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3858 WARN_ON_ONCE(rdp->dynticks_nesting != 1); 3859 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); 3860 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 3861 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 3862 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 3863 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 3864 rdp->cpu = cpu; 3865 rcu_boot_init_nocb_percpu_data(rdp); 3866 } 3867 3868 /* 3869 * Invoked early in the CPU-online process, when pretty much all services 3870 * are available. The incoming CPU is not present. 3871 * 3872 * Initializes a CPU's per-CPU RCU data. Note that only one online or 3873 * offline event can be happening at a given time. Note also that we can 3874 * accept some slop in the rsp->gp_seq access due to the fact that this 3875 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 3876 * And any offloaded callbacks are being numbered elsewhere. 3877 */ 3878 int rcutree_prepare_cpu(unsigned int cpu) 3879 { 3880 unsigned long flags; 3881 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3882 struct rcu_node *rnp = rcu_get_root(); 3883 3884 /* Set up local state, ensuring consistent view of global state. */ 3885 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3886 rdp->qlen_last_fqs_check = 0; 3887 rdp->n_force_qs_snap = rcu_state.n_force_qs; 3888 rdp->blimit = blimit; 3889 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 3890 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3891 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 3892 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ 3893 rcu_dynticks_eqs_online(); 3894 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 3895 3896 /* 3897 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 3898 * propagation up the rcu_node tree will happen at the beginning 3899 * of the next grace period. 3900 */ 3901 rnp = rdp->mynode; 3902 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3903 rdp->beenonline = true; /* We have now been online. */ 3904 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 3905 rdp->gp_seq_needed = rdp->gp_seq; 3906 rdp->cpu_no_qs.b.norm = true; 3907 rdp->core_needs_qs = false; 3908 rdp->rcu_iw_pending = false; 3909 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 3910 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 3911 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3912 rcu_prepare_kthreads(cpu); 3913 rcu_spawn_cpu_nocb_kthread(cpu); 3914 3915 return 0; 3916 } 3917 3918 /* 3919 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 3920 */ 3921 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 3922 { 3923 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3924 3925 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 3926 } 3927 3928 /* 3929 * Near the end of the CPU-online process. Pretty much all services 3930 * enabled, and the CPU is now very much alive. 3931 */ 3932 int rcutree_online_cpu(unsigned int cpu) 3933 { 3934 unsigned long flags; 3935 struct rcu_data *rdp; 3936 struct rcu_node *rnp; 3937 3938 rdp = per_cpu_ptr(&rcu_data, cpu); 3939 rnp = rdp->mynode; 3940 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3941 rnp->ffmask |= rdp->grpmask; 3942 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3943 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 3944 return 0; /* Too early in boot for scheduler work. */ 3945 sync_sched_exp_online_cleanup(cpu); 3946 rcutree_affinity_setting(cpu, -1); 3947 3948 // Stop-machine done, so allow nohz_full to disable tick. 3949 tick_dep_clear(TICK_DEP_BIT_RCU); 3950 return 0; 3951 } 3952 3953 /* 3954 * Near the beginning of the process. The CPU is still very much alive 3955 * with pretty much all services enabled. 3956 */ 3957 int rcutree_offline_cpu(unsigned int cpu) 3958 { 3959 unsigned long flags; 3960 struct rcu_data *rdp; 3961 struct rcu_node *rnp; 3962 3963 rdp = per_cpu_ptr(&rcu_data, cpu); 3964 rnp = rdp->mynode; 3965 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3966 rnp->ffmask &= ~rdp->grpmask; 3967 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3968 3969 rcutree_affinity_setting(cpu, cpu); 3970 3971 // nohz_full CPUs need the tick for stop-machine to work quickly 3972 tick_dep_set(TICK_DEP_BIT_RCU); 3973 return 0; 3974 } 3975 3976 static DEFINE_PER_CPU(int, rcu_cpu_started); 3977 3978 /* 3979 * Mark the specified CPU as being online so that subsequent grace periods 3980 * (both expedited and normal) will wait on it. Note that this means that 3981 * incoming CPUs are not allowed to use RCU read-side critical sections 3982 * until this function is called. Failing to observe this restriction 3983 * will result in lockdep splats. 3984 * 3985 * Note that this function is special in that it is invoked directly 3986 * from the incoming CPU rather than from the cpuhp_step mechanism. 3987 * This is because this function must be invoked at a precise location. 3988 */ 3989 void rcu_cpu_starting(unsigned int cpu) 3990 { 3991 unsigned long flags; 3992 unsigned long mask; 3993 struct rcu_data *rdp; 3994 struct rcu_node *rnp; 3995 bool newcpu; 3996 3997 if (per_cpu(rcu_cpu_started, cpu)) 3998 return; 3999 4000 per_cpu(rcu_cpu_started, cpu) = 1; 4001 4002 rdp = per_cpu_ptr(&rcu_data, cpu); 4003 rnp = rdp->mynode; 4004 mask = rdp->grpmask; 4005 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4006 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 4007 newcpu = !(rnp->expmaskinitnext & mask); 4008 rnp->expmaskinitnext |= mask; 4009 /* Allow lockless access for expedited grace periods. */ 4010 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 4011 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 4012 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 4013 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4014 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4015 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ 4016 rcu_disable_urgency_upon_qs(rdp); 4017 /* Report QS -after- changing ->qsmaskinitnext! */ 4018 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4019 } else { 4020 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4021 } 4022 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 4023 } 4024 4025 #ifdef CONFIG_HOTPLUG_CPU 4026 /* 4027 * The outgoing function has no further need of RCU, so remove it from 4028 * the rcu_node tree's ->qsmaskinitnext bit masks. 4029 * 4030 * Note that this function is special in that it is invoked directly 4031 * from the outgoing CPU rather than from the cpuhp_step mechanism. 4032 * This is because this function must be invoked at a precise location. 4033 */ 4034 void rcu_report_dead(unsigned int cpu) 4035 { 4036 unsigned long flags; 4037 unsigned long mask; 4038 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4039 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4040 4041 /* QS for any half-done expedited grace period. */ 4042 preempt_disable(); 4043 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 4044 preempt_enable(); 4045 rcu_preempt_deferred_qs(current); 4046 4047 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4048 mask = rdp->grpmask; 4049 raw_spin_lock(&rcu_state.ofl_lock); 4050 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4051 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4052 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4053 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 4054 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 4055 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4056 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4057 } 4058 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 4059 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4060 raw_spin_unlock(&rcu_state.ofl_lock); 4061 4062 per_cpu(rcu_cpu_started, cpu) = 0; 4063 } 4064 4065 /* 4066 * The outgoing CPU has just passed through the dying-idle state, and we 4067 * are being invoked from the CPU that was IPIed to continue the offline 4068 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 4069 */ 4070 void rcutree_migrate_callbacks(int cpu) 4071 { 4072 unsigned long flags; 4073 struct rcu_data *my_rdp; 4074 struct rcu_node *my_rnp; 4075 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4076 bool needwake; 4077 4078 if (rcu_segcblist_is_offloaded(&rdp->cblist) || 4079 rcu_segcblist_empty(&rdp->cblist)) 4080 return; /* No callbacks to migrate. */ 4081 4082 local_irq_save(flags); 4083 my_rdp = this_cpu_ptr(&rcu_data); 4084 my_rnp = my_rdp->mynode; 4085 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 4086 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies)); 4087 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 4088 /* Leverage recent GPs and set GP for new callbacks. */ 4089 needwake = rcu_advance_cbs(my_rnp, rdp) || 4090 rcu_advance_cbs(my_rnp, my_rdp); 4091 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 4092 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 4093 rcu_segcblist_disable(&rdp->cblist); 4094 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 4095 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 4096 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) { 4097 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4098 __call_rcu_nocb_wake(my_rdp, true, flags); 4099 } else { 4100 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 4101 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); 4102 } 4103 if (needwake) 4104 rcu_gp_kthread_wake(); 4105 lockdep_assert_irqs_enabled(); 4106 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 4107 !rcu_segcblist_empty(&rdp->cblist), 4108 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 4109 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 4110 rcu_segcblist_first_cb(&rdp->cblist)); 4111 } 4112 #endif 4113 4114 /* 4115 * On non-huge systems, use expedited RCU grace periods to make suspend 4116 * and hibernation run faster. 4117 */ 4118 static int rcu_pm_notify(struct notifier_block *self, 4119 unsigned long action, void *hcpu) 4120 { 4121 switch (action) { 4122 case PM_HIBERNATION_PREPARE: 4123 case PM_SUSPEND_PREPARE: 4124 rcu_expedite_gp(); 4125 break; 4126 case PM_POST_HIBERNATION: 4127 case PM_POST_SUSPEND: 4128 rcu_unexpedite_gp(); 4129 break; 4130 default: 4131 break; 4132 } 4133 return NOTIFY_OK; 4134 } 4135 4136 /* 4137 * Spawn the kthreads that handle RCU's grace periods. 4138 */ 4139 static int __init rcu_spawn_gp_kthread(void) 4140 { 4141 unsigned long flags; 4142 int kthread_prio_in = kthread_prio; 4143 struct rcu_node *rnp; 4144 struct sched_param sp; 4145 struct task_struct *t; 4146 4147 /* Force priority into range. */ 4148 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4149 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4150 kthread_prio = 2; 4151 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4152 kthread_prio = 1; 4153 else if (kthread_prio < 0) 4154 kthread_prio = 0; 4155 else if (kthread_prio > 99) 4156 kthread_prio = 99; 4157 4158 if (kthread_prio != kthread_prio_in) 4159 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 4160 kthread_prio, kthread_prio_in); 4161 4162 rcu_scheduler_fully_active = 1; 4163 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4164 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4165 return 0; 4166 if (kthread_prio) { 4167 sp.sched_priority = kthread_prio; 4168 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4169 } 4170 rnp = rcu_get_root(); 4171 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4172 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4173 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4174 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4175 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4176 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4177 wake_up_process(t); 4178 rcu_spawn_nocb_kthreads(); 4179 rcu_spawn_boost_kthreads(); 4180 return 0; 4181 } 4182 early_initcall(rcu_spawn_gp_kthread); 4183 4184 /* 4185 * This function is invoked towards the end of the scheduler's 4186 * initialization process. Before this is called, the idle task might 4187 * contain synchronous grace-period primitives (during which time, this idle 4188 * task is booting the system, and such primitives are no-ops). After this 4189 * function is called, any synchronous grace-period primitives are run as 4190 * expedited, with the requesting task driving the grace period forward. 4191 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4192 * runtime RCU functionality. 4193 */ 4194 void rcu_scheduler_starting(void) 4195 { 4196 WARN_ON(num_online_cpus() != 1); 4197 WARN_ON(nr_context_switches() > 0); 4198 rcu_test_sync_prims(); 4199 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4200 rcu_test_sync_prims(); 4201 } 4202 4203 /* 4204 * Helper function for rcu_init() that initializes the rcu_state structure. 4205 */ 4206 static void __init rcu_init_one(void) 4207 { 4208 static const char * const buf[] = RCU_NODE_NAME_INIT; 4209 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4210 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4211 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4212 4213 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4214 int cpustride = 1; 4215 int i; 4216 int j; 4217 struct rcu_node *rnp; 4218 4219 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4220 4221 /* Silence gcc 4.8 false positive about array index out of range. */ 4222 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4223 panic("rcu_init_one: rcu_num_lvls out of range"); 4224 4225 /* Initialize the level-tracking arrays. */ 4226 4227 for (i = 1; i < rcu_num_lvls; i++) 4228 rcu_state.level[i] = 4229 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4230 rcu_init_levelspread(levelspread, num_rcu_lvl); 4231 4232 /* Initialize the elements themselves, starting from the leaves. */ 4233 4234 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4235 cpustride *= levelspread[i]; 4236 rnp = rcu_state.level[i]; 4237 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4238 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4239 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4240 &rcu_node_class[i], buf[i]); 4241 raw_spin_lock_init(&rnp->fqslock); 4242 lockdep_set_class_and_name(&rnp->fqslock, 4243 &rcu_fqs_class[i], fqs[i]); 4244 rnp->gp_seq = rcu_state.gp_seq; 4245 rnp->gp_seq_needed = rcu_state.gp_seq; 4246 rnp->completedqs = rcu_state.gp_seq; 4247 rnp->qsmask = 0; 4248 rnp->qsmaskinit = 0; 4249 rnp->grplo = j * cpustride; 4250 rnp->grphi = (j + 1) * cpustride - 1; 4251 if (rnp->grphi >= nr_cpu_ids) 4252 rnp->grphi = nr_cpu_ids - 1; 4253 if (i == 0) { 4254 rnp->grpnum = 0; 4255 rnp->grpmask = 0; 4256 rnp->parent = NULL; 4257 } else { 4258 rnp->grpnum = j % levelspread[i - 1]; 4259 rnp->grpmask = BIT(rnp->grpnum); 4260 rnp->parent = rcu_state.level[i - 1] + 4261 j / levelspread[i - 1]; 4262 } 4263 rnp->level = i; 4264 INIT_LIST_HEAD(&rnp->blkd_tasks); 4265 rcu_init_one_nocb(rnp); 4266 init_waitqueue_head(&rnp->exp_wq[0]); 4267 init_waitqueue_head(&rnp->exp_wq[1]); 4268 init_waitqueue_head(&rnp->exp_wq[2]); 4269 init_waitqueue_head(&rnp->exp_wq[3]); 4270 spin_lock_init(&rnp->exp_lock); 4271 } 4272 } 4273 4274 init_swait_queue_head(&rcu_state.gp_wq); 4275 init_swait_queue_head(&rcu_state.expedited_wq); 4276 rnp = rcu_first_leaf_node(); 4277 for_each_possible_cpu(i) { 4278 while (i > rnp->grphi) 4279 rnp++; 4280 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4281 rcu_boot_init_percpu_data(i); 4282 } 4283 } 4284 4285 /* 4286 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4287 * replace the definitions in tree.h because those are needed to size 4288 * the ->node array in the rcu_state structure. 4289 */ 4290 static void __init rcu_init_geometry(void) 4291 { 4292 ulong d; 4293 int i; 4294 int rcu_capacity[RCU_NUM_LVLS]; 4295 4296 /* 4297 * Initialize any unspecified boot parameters. 4298 * The default values of jiffies_till_first_fqs and 4299 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4300 * value, which is a function of HZ, then adding one for each 4301 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4302 */ 4303 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4304 if (jiffies_till_first_fqs == ULONG_MAX) 4305 jiffies_till_first_fqs = d; 4306 if (jiffies_till_next_fqs == ULONG_MAX) 4307 jiffies_till_next_fqs = d; 4308 adjust_jiffies_till_sched_qs(); 4309 4310 /* If the compile-time values are accurate, just leave. */ 4311 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4312 nr_cpu_ids == NR_CPUS) 4313 return; 4314 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4315 rcu_fanout_leaf, nr_cpu_ids); 4316 4317 /* 4318 * The boot-time rcu_fanout_leaf parameter must be at least two 4319 * and cannot exceed the number of bits in the rcu_node masks. 4320 * Complain and fall back to the compile-time values if this 4321 * limit is exceeded. 4322 */ 4323 if (rcu_fanout_leaf < 2 || 4324 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4325 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4326 WARN_ON(1); 4327 return; 4328 } 4329 4330 /* 4331 * Compute number of nodes that can be handled an rcu_node tree 4332 * with the given number of levels. 4333 */ 4334 rcu_capacity[0] = rcu_fanout_leaf; 4335 for (i = 1; i < RCU_NUM_LVLS; i++) 4336 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4337 4338 /* 4339 * The tree must be able to accommodate the configured number of CPUs. 4340 * If this limit is exceeded, fall back to the compile-time values. 4341 */ 4342 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4343 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4344 WARN_ON(1); 4345 return; 4346 } 4347 4348 /* Calculate the number of levels in the tree. */ 4349 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4350 } 4351 rcu_num_lvls = i + 1; 4352 4353 /* Calculate the number of rcu_nodes at each level of the tree. */ 4354 for (i = 0; i < rcu_num_lvls; i++) { 4355 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4356 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4357 } 4358 4359 /* Calculate the total number of rcu_node structures. */ 4360 rcu_num_nodes = 0; 4361 for (i = 0; i < rcu_num_lvls; i++) 4362 rcu_num_nodes += num_rcu_lvl[i]; 4363 } 4364 4365 /* 4366 * Dump out the structure of the rcu_node combining tree associated 4367 * with the rcu_state structure. 4368 */ 4369 static void __init rcu_dump_rcu_node_tree(void) 4370 { 4371 int level = 0; 4372 struct rcu_node *rnp; 4373 4374 pr_info("rcu_node tree layout dump\n"); 4375 pr_info(" "); 4376 rcu_for_each_node_breadth_first(rnp) { 4377 if (rnp->level != level) { 4378 pr_cont("\n"); 4379 pr_info(" "); 4380 level = rnp->level; 4381 } 4382 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4383 } 4384 pr_cont("\n"); 4385 } 4386 4387 struct workqueue_struct *rcu_gp_wq; 4388 struct workqueue_struct *rcu_par_gp_wq; 4389 4390 static void __init kfree_rcu_batch_init(void) 4391 { 4392 int cpu; 4393 int i; 4394 4395 for_each_possible_cpu(cpu) { 4396 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 4397 struct kvfree_rcu_bulk_data *bnode; 4398 4399 for (i = 0; i < KFREE_N_BATCHES; i++) { 4400 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); 4401 krcp->krw_arr[i].krcp = krcp; 4402 } 4403 4404 for (i = 0; i < rcu_min_cached_objs; i++) { 4405 bnode = (struct kvfree_rcu_bulk_data *) 4406 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 4407 4408 if (bnode) 4409 put_cached_bnode(krcp, bnode); 4410 else 4411 pr_err("Failed to preallocate for %d CPU!\n", cpu); 4412 } 4413 4414 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); 4415 krcp->initialized = true; 4416 } 4417 if (register_shrinker(&kfree_rcu_shrinker)) 4418 pr_err("Failed to register kfree_rcu() shrinker!\n"); 4419 } 4420 4421 void __init rcu_init(void) 4422 { 4423 int cpu; 4424 4425 rcu_early_boot_tests(); 4426 4427 kfree_rcu_batch_init(); 4428 rcu_bootup_announce(); 4429 rcu_init_geometry(); 4430 rcu_init_one(); 4431 if (dump_tree) 4432 rcu_dump_rcu_node_tree(); 4433 if (use_softirq) 4434 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4435 4436 /* 4437 * We don't need protection against CPU-hotplug here because 4438 * this is called early in boot, before either interrupts 4439 * or the scheduler are operational. 4440 */ 4441 pm_notifier(rcu_pm_notify, 0); 4442 for_each_online_cpu(cpu) { 4443 rcutree_prepare_cpu(cpu); 4444 rcu_cpu_starting(cpu); 4445 rcutree_online_cpu(cpu); 4446 } 4447 4448 /* Create workqueue for expedited GPs and for Tree SRCU. */ 4449 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 4450 WARN_ON(!rcu_gp_wq); 4451 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 4452 WARN_ON(!rcu_par_gp_wq); 4453 srcu_init(); 4454 4455 /* Fill in default value for rcutree.qovld boot parameter. */ 4456 /* -After- the rcu_node ->lock fields are initialized! */ 4457 if (qovld < 0) 4458 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4459 else 4460 qovld_calc = qovld; 4461 } 4462 4463 #include "tree_stall.h" 4464 #include "tree_exp.h" 4465 #include "tree_plugin.h" 4466