1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/panic.h> 36 #include <linux/panic_notifier.h> 37 #include <linux/percpu.h> 38 #include <linux/notifier.h> 39 #include <linux/cpu.h> 40 #include <linux/mutex.h> 41 #include <linux/time.h> 42 #include <linux/kernel_stat.h> 43 #include <linux/wait.h> 44 #include <linux/kthread.h> 45 #include <uapi/linux/sched/types.h> 46 #include <linux/prefetch.h> 47 #include <linux/delay.h> 48 #include <linux/random.h> 49 #include <linux/trace_events.h> 50 #include <linux/suspend.h> 51 #include <linux/ftrace.h> 52 #include <linux/tick.h> 53 #include <linux/sysrq.h> 54 #include <linux/kprobes.h> 55 #include <linux/gfp.h> 56 #include <linux/oom.h> 57 #include <linux/smpboot.h> 58 #include <linux/jiffies.h> 59 #include <linux/slab.h> 60 #include <linux/sched/isolation.h> 61 #include <linux/sched/clock.h> 62 #include <linux/vmalloc.h> 63 #include <linux/mm.h> 64 #include <linux/kasan.h> 65 #include <linux/context_tracking.h> 66 #include "../time/tick-internal.h" 67 68 #include "tree.h" 69 #include "rcu.h" 70 71 #ifdef MODULE_PARAM_PREFIX 72 #undef MODULE_PARAM_PREFIX 73 #endif 74 #define MODULE_PARAM_PREFIX "rcutree." 75 76 /* Data structures. */ 77 78 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 79 .gpwrap = true, 80 #ifdef CONFIG_RCU_NOCB_CPU 81 .cblist.flags = SEGCBLIST_RCU_CORE, 82 #endif 83 }; 84 static struct rcu_state rcu_state = { 85 .level = { &rcu_state.node[0] }, 86 .gp_state = RCU_GP_IDLE, 87 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 88 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 89 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock), 90 .name = RCU_NAME, 91 .abbr = RCU_ABBR, 92 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 93 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 94 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED, 95 }; 96 97 /* Dump rcu_node combining tree at boot to verify correct setup. */ 98 static bool dump_tree; 99 module_param(dump_tree, bool, 0444); 100 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 101 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT); 102 #ifndef CONFIG_PREEMPT_RT 103 module_param(use_softirq, bool, 0444); 104 #endif 105 /* Control rcu_node-tree auto-balancing at boot time. */ 106 static bool rcu_fanout_exact; 107 module_param(rcu_fanout_exact, bool, 0444); 108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 110 module_param(rcu_fanout_leaf, int, 0444); 111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 112 /* Number of rcu_nodes at specified level. */ 113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 115 116 /* 117 * The rcu_scheduler_active variable is initialized to the value 118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 120 * RCU can assume that there is but one task, allowing RCU to (for example) 121 * optimize synchronize_rcu() to a simple barrier(). When this variable 122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 123 * to detect real grace periods. This variable is also used to suppress 124 * boot-time false positives from lockdep-RCU error checking. Finally, it 125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 126 * is fully initialized, including all of its kthreads having been spawned. 127 */ 128 int rcu_scheduler_active __read_mostly; 129 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 130 131 /* 132 * The rcu_scheduler_fully_active variable transitions from zero to one 133 * during the early_initcall() processing, which is after the scheduler 134 * is capable of creating new tasks. So RCU processing (for example, 135 * creating tasks for RCU priority boosting) must be delayed until after 136 * rcu_scheduler_fully_active transitions from zero to one. We also 137 * currently delay invocation of any RCU callbacks until after this point. 138 * 139 * It might later prove better for people registering RCU callbacks during 140 * early boot to take responsibility for these callbacks, but one step at 141 * a time. 142 */ 143 static int rcu_scheduler_fully_active __read_mostly; 144 145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 146 unsigned long gps, unsigned long flags); 147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 150 static void invoke_rcu_core(void); 151 static void rcu_report_exp_rdp(struct rcu_data *rdp); 152 static void sync_sched_exp_online_cleanup(int cpu); 153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); 155 156 /* 157 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop" 158 * real-time priority(enabling/disabling) is controlled by 159 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration. 160 */ 161 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 162 module_param(kthread_prio, int, 0444); 163 164 /* Delay in jiffies for grace-period initialization delays, debug only. */ 165 166 static int gp_preinit_delay; 167 module_param(gp_preinit_delay, int, 0444); 168 static int gp_init_delay; 169 module_param(gp_init_delay, int, 0444); 170 static int gp_cleanup_delay; 171 module_param(gp_cleanup_delay, int, 0444); 172 173 // Add delay to rcu_read_unlock() for strict grace periods. 174 static int rcu_unlock_delay; 175 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD 176 module_param(rcu_unlock_delay, int, 0444); 177 #endif 178 179 /* 180 * This rcu parameter is runtime-read-only. It reflects 181 * a minimum allowed number of objects which can be cached 182 * per-CPU. Object size is equal to one page. This value 183 * can be changed at boot time. 184 */ 185 static int rcu_min_cached_objs = 5; 186 module_param(rcu_min_cached_objs, int, 0444); 187 188 // A page shrinker can ask for pages to be freed to make them 189 // available for other parts of the system. This usually happens 190 // under low memory conditions, and in that case we should also 191 // defer page-cache filling for a short time period. 192 // 193 // The default value is 5 seconds, which is long enough to reduce 194 // interference with the shrinker while it asks other systems to 195 // drain their caches. 196 static int rcu_delay_page_cache_fill_msec = 5000; 197 module_param(rcu_delay_page_cache_fill_msec, int, 0444); 198 199 /* Retrieve RCU kthreads priority for rcutorture */ 200 int rcu_get_gp_kthreads_prio(void) 201 { 202 return kthread_prio; 203 } 204 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 205 206 /* 207 * Number of grace periods between delays, normalized by the duration of 208 * the delay. The longer the delay, the more the grace periods between 209 * each delay. The reason for this normalization is that it means that, 210 * for non-zero delays, the overall slowdown of grace periods is constant 211 * regardless of the duration of the delay. This arrangement balances 212 * the need for long delays to increase some race probabilities with the 213 * need for fast grace periods to increase other race probabilities. 214 */ 215 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */ 216 217 /* 218 * Compute the mask of online CPUs for the specified rcu_node structure. 219 * This will not be stable unless the rcu_node structure's ->lock is 220 * held, but the bit corresponding to the current CPU will be stable 221 * in most contexts. 222 */ 223 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 224 { 225 return READ_ONCE(rnp->qsmaskinitnext); 226 } 227 228 /* 229 * Is the CPU corresponding to the specified rcu_data structure online 230 * from RCU's perspective? This perspective is given by that structure's 231 * ->qsmaskinitnext field rather than by the global cpu_online_mask. 232 */ 233 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) 234 { 235 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); 236 } 237 238 /* 239 * Return true if an RCU grace period is in progress. The READ_ONCE()s 240 * permit this function to be invoked without holding the root rcu_node 241 * structure's ->lock, but of course results can be subject to change. 242 */ 243 static int rcu_gp_in_progress(void) 244 { 245 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 246 } 247 248 /* 249 * Return the number of callbacks queued on the specified CPU. 250 * Handles both the nocbs and normal cases. 251 */ 252 static long rcu_get_n_cbs_cpu(int cpu) 253 { 254 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 255 256 if (rcu_segcblist_is_enabled(&rdp->cblist)) 257 return rcu_segcblist_n_cbs(&rdp->cblist); 258 return 0; 259 } 260 261 void rcu_softirq_qs(void) 262 { 263 rcu_qs(); 264 rcu_preempt_deferred_qs(current); 265 rcu_tasks_qs(current, false); 266 } 267 268 /* 269 * Reset the current CPU's ->dynticks counter to indicate that the 270 * newly onlined CPU is no longer in an extended quiescent state. 271 * This will either leave the counter unchanged, or increment it 272 * to the next non-quiescent value. 273 * 274 * The non-atomic test/increment sequence works because the upper bits 275 * of the ->dynticks counter are manipulated only by the corresponding CPU, 276 * or when the corresponding CPU is offline. 277 */ 278 static void rcu_dynticks_eqs_online(void) 279 { 280 if (ct_dynticks() & RCU_DYNTICKS_IDX) 281 return; 282 ct_state_inc(RCU_DYNTICKS_IDX); 283 } 284 285 /* 286 * Snapshot the ->dynticks counter with full ordering so as to allow 287 * stable comparison of this counter with past and future snapshots. 288 */ 289 static int rcu_dynticks_snap(int cpu) 290 { 291 smp_mb(); // Fundamental RCU ordering guarantee. 292 return ct_dynticks_cpu_acquire(cpu); 293 } 294 295 /* 296 * Return true if the snapshot returned from rcu_dynticks_snap() 297 * indicates that RCU is in an extended quiescent state. 298 */ 299 static bool rcu_dynticks_in_eqs(int snap) 300 { 301 return !(snap & RCU_DYNTICKS_IDX); 302 } 303 304 /* 305 * Return true if the CPU corresponding to the specified rcu_data 306 * structure has spent some time in an extended quiescent state since 307 * rcu_dynticks_snap() returned the specified snapshot. 308 */ 309 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 310 { 311 return snap != rcu_dynticks_snap(rdp->cpu); 312 } 313 314 /* 315 * Return true if the referenced integer is zero while the specified 316 * CPU remains within a single extended quiescent state. 317 */ 318 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) 319 { 320 int snap; 321 322 // If not quiescent, force back to earlier extended quiescent state. 323 snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX; 324 smp_rmb(); // Order ->dynticks and *vp reads. 325 if (READ_ONCE(*vp)) 326 return false; // Non-zero, so report failure; 327 smp_rmb(); // Order *vp read and ->dynticks re-read. 328 329 // If still in the same extended quiescent state, we are good! 330 return snap == ct_dynticks_cpu(cpu); 331 } 332 333 /* 334 * Let the RCU core know that this CPU has gone through the scheduler, 335 * which is a quiescent state. This is called when the need for a 336 * quiescent state is urgent, so we burn an atomic operation and full 337 * memory barriers to let the RCU core know about it, regardless of what 338 * this CPU might (or might not) do in the near future. 339 * 340 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 341 * 342 * The caller must have disabled interrupts and must not be idle. 343 */ 344 notrace void rcu_momentary_dyntick_idle(void) 345 { 346 int seq; 347 348 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 349 seq = ct_state_inc(2 * RCU_DYNTICKS_IDX); 350 /* It is illegal to call this from idle state. */ 351 WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX)); 352 rcu_preempt_deferred_qs(current); 353 } 354 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 355 356 /** 357 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 358 * 359 * If the current CPU is idle and running at a first-level (not nested) 360 * interrupt, or directly, from idle, return true. 361 * 362 * The caller must have at least disabled IRQs. 363 */ 364 static int rcu_is_cpu_rrupt_from_idle(void) 365 { 366 long nesting; 367 368 /* 369 * Usually called from the tick; but also used from smp_function_call() 370 * for expedited grace periods. This latter can result in running from 371 * the idle task, instead of an actual IPI. 372 */ 373 lockdep_assert_irqs_disabled(); 374 375 /* Check for counter underflows */ 376 RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0, 377 "RCU dynticks_nesting counter underflow!"); 378 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0, 379 "RCU dynticks_nmi_nesting counter underflow/zero!"); 380 381 /* Are we at first interrupt nesting level? */ 382 nesting = ct_dynticks_nmi_nesting(); 383 if (nesting > 1) 384 return false; 385 386 /* 387 * If we're not in an interrupt, we must be in the idle task! 388 */ 389 WARN_ON_ONCE(!nesting && !is_idle_task(current)); 390 391 /* Does CPU appear to be idle from an RCU standpoint? */ 392 return ct_dynticks_nesting() == 0; 393 } 394 395 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10) 396 // Maximum callbacks per rcu_do_batch ... 397 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood. 398 static long blimit = DEFAULT_RCU_BLIMIT; 399 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit. 400 static long qhimark = DEFAULT_RCU_QHIMARK; 401 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit. 402 static long qlowmark = DEFAULT_RCU_QLOMARK; 403 #define DEFAULT_RCU_QOVLD_MULT 2 404 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 405 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS. 406 static long qovld_calc = -1; // No pre-initialization lock acquisitions! 407 408 module_param(blimit, long, 0444); 409 module_param(qhimark, long, 0444); 410 module_param(qlowmark, long, 0444); 411 module_param(qovld, long, 0444); 412 413 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX; 414 static ulong jiffies_till_next_fqs = ULONG_MAX; 415 static bool rcu_kick_kthreads; 416 static int rcu_divisor = 7; 417 module_param(rcu_divisor, int, 0644); 418 419 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 420 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 421 module_param(rcu_resched_ns, long, 0644); 422 423 /* 424 * How long the grace period must be before we start recruiting 425 * quiescent-state help from rcu_note_context_switch(). 426 */ 427 static ulong jiffies_till_sched_qs = ULONG_MAX; 428 module_param(jiffies_till_sched_qs, ulong, 0444); 429 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 430 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 431 432 /* 433 * Make sure that we give the grace-period kthread time to detect any 434 * idle CPUs before taking active measures to force quiescent states. 435 * However, don't go below 100 milliseconds, adjusted upwards for really 436 * large systems. 437 */ 438 static void adjust_jiffies_till_sched_qs(void) 439 { 440 unsigned long j; 441 442 /* If jiffies_till_sched_qs was specified, respect the request. */ 443 if (jiffies_till_sched_qs != ULONG_MAX) { 444 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 445 return; 446 } 447 /* Otherwise, set to third fqs scan, but bound below on large system. */ 448 j = READ_ONCE(jiffies_till_first_fqs) + 449 2 * READ_ONCE(jiffies_till_next_fqs); 450 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 451 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 452 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 453 WRITE_ONCE(jiffies_to_sched_qs, j); 454 } 455 456 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 457 { 458 ulong j; 459 int ret = kstrtoul(val, 0, &j); 460 461 if (!ret) { 462 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 463 adjust_jiffies_till_sched_qs(); 464 } 465 return ret; 466 } 467 468 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 469 { 470 ulong j; 471 int ret = kstrtoul(val, 0, &j); 472 473 if (!ret) { 474 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 475 adjust_jiffies_till_sched_qs(); 476 } 477 return ret; 478 } 479 480 static const struct kernel_param_ops first_fqs_jiffies_ops = { 481 .set = param_set_first_fqs_jiffies, 482 .get = param_get_ulong, 483 }; 484 485 static const struct kernel_param_ops next_fqs_jiffies_ops = { 486 .set = param_set_next_fqs_jiffies, 487 .get = param_get_ulong, 488 }; 489 490 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 491 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 492 module_param(rcu_kick_kthreads, bool, 0644); 493 494 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 495 static int rcu_pending(int user); 496 497 /* 498 * Return the number of RCU GPs completed thus far for debug & stats. 499 */ 500 unsigned long rcu_get_gp_seq(void) 501 { 502 return READ_ONCE(rcu_state.gp_seq); 503 } 504 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 505 506 /* 507 * Return the number of RCU expedited batches completed thus far for 508 * debug & stats. Odd numbers mean that a batch is in progress, even 509 * numbers mean idle. The value returned will thus be roughly double 510 * the cumulative batches since boot. 511 */ 512 unsigned long rcu_exp_batches_completed(void) 513 { 514 return rcu_state.expedited_sequence; 515 } 516 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 517 518 /* 519 * Return the root node of the rcu_state structure. 520 */ 521 static struct rcu_node *rcu_get_root(void) 522 { 523 return &rcu_state.node[0]; 524 } 525 526 /* 527 * Send along grace-period-related data for rcutorture diagnostics. 528 */ 529 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 530 unsigned long *gp_seq) 531 { 532 switch (test_type) { 533 case RCU_FLAVOR: 534 *flags = READ_ONCE(rcu_state.gp_flags); 535 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 536 break; 537 default: 538 break; 539 } 540 } 541 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 542 543 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) 544 /* 545 * An empty function that will trigger a reschedule on 546 * IRQ tail once IRQs get re-enabled on userspace/guest resume. 547 */ 548 static void late_wakeup_func(struct irq_work *work) 549 { 550 } 551 552 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) = 553 IRQ_WORK_INIT(late_wakeup_func); 554 555 /* 556 * If either: 557 * 558 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work 559 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry. 560 * 561 * In these cases the late RCU wake ups aren't supported in the resched loops and our 562 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs 563 * get re-enabled again. 564 */ 565 noinstr void rcu_irq_work_resched(void) 566 { 567 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 568 569 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) 570 return; 571 572 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) 573 return; 574 575 instrumentation_begin(); 576 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { 577 irq_work_queue(this_cpu_ptr(&late_wakeup_work)); 578 } 579 instrumentation_end(); 580 } 581 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */ 582 583 #ifdef CONFIG_PROVE_RCU 584 /** 585 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 586 */ 587 void rcu_irq_exit_check_preempt(void) 588 { 589 lockdep_assert_irqs_disabled(); 590 591 RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0, 592 "RCU dynticks_nesting counter underflow/zero!"); 593 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() != 594 DYNTICK_IRQ_NONIDLE, 595 "Bad RCU dynticks_nmi_nesting counter\n"); 596 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 597 "RCU in extended quiescent state!"); 598 } 599 #endif /* #ifdef CONFIG_PROVE_RCU */ 600 601 #ifdef CONFIG_NO_HZ_FULL 602 /** 603 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 604 * 605 * The scheduler tick is not normally enabled when CPUs enter the kernel 606 * from nohz_full userspace execution. After all, nohz_full userspace 607 * execution is an RCU quiescent state and the time executing in the kernel 608 * is quite short. Except of course when it isn't. And it is not hard to 609 * cause a large system to spend tens of seconds or even minutes looping 610 * in the kernel, which can cause a number of problems, include RCU CPU 611 * stall warnings. 612 * 613 * Therefore, if a nohz_full CPU fails to report a quiescent state 614 * in a timely manner, the RCU grace-period kthread sets that CPU's 615 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 616 * exception will invoke this function, which will turn on the scheduler 617 * tick, which will enable RCU to detect that CPU's quiescent states, 618 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 619 * The tick will be disabled once a quiescent state is reported for 620 * this CPU. 621 * 622 * Of course, in carefully tuned systems, there might never be an 623 * interrupt or exception. In that case, the RCU grace-period kthread 624 * will eventually cause one to happen. However, in less carefully 625 * controlled environments, this function allows RCU to get what it 626 * needs without creating otherwise useless interruptions. 627 */ 628 void __rcu_irq_enter_check_tick(void) 629 { 630 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 631 632 // If we're here from NMI there's nothing to do. 633 if (in_nmi()) 634 return; 635 636 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 637 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 638 639 if (!tick_nohz_full_cpu(rdp->cpu) || 640 !READ_ONCE(rdp->rcu_urgent_qs) || 641 READ_ONCE(rdp->rcu_forced_tick)) { 642 // RCU doesn't need nohz_full help from this CPU, or it is 643 // already getting that help. 644 return; 645 } 646 647 // We get here only when not in an extended quiescent state and 648 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 649 // already watching and (2) The fact that we are in an interrupt 650 // handler and that the rcu_node lock is an irq-disabled lock 651 // prevents self-deadlock. So we can safely recheck under the lock. 652 // Note that the nohz_full state currently cannot change. 653 raw_spin_lock_rcu_node(rdp->mynode); 654 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { 655 // A nohz_full CPU is in the kernel and RCU needs a 656 // quiescent state. Turn on the tick! 657 WRITE_ONCE(rdp->rcu_forced_tick, true); 658 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 659 } 660 raw_spin_unlock_rcu_node(rdp->mynode); 661 } 662 #endif /* CONFIG_NO_HZ_FULL */ 663 664 /* 665 * Check to see if any future non-offloaded RCU-related work will need 666 * to be done by the current CPU, even if none need be done immediately, 667 * returning 1 if so. This function is part of the RCU implementation; 668 * it is -not- an exported member of the RCU API. This is used by 669 * the idle-entry code to figure out whether it is safe to disable the 670 * scheduler-clock interrupt. 671 * 672 * Just check whether or not this CPU has non-offloaded RCU callbacks 673 * queued. 674 */ 675 int rcu_needs_cpu(void) 676 { 677 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && 678 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); 679 } 680 681 /* 682 * If any sort of urgency was applied to the current CPU (for example, 683 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 684 * to get to a quiescent state, disable it. 685 */ 686 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 687 { 688 raw_lockdep_assert_held_rcu_node(rdp->mynode); 689 WRITE_ONCE(rdp->rcu_urgent_qs, false); 690 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 691 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 692 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 693 WRITE_ONCE(rdp->rcu_forced_tick, false); 694 } 695 } 696 697 /** 698 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 699 * 700 * Return true if RCU is watching the running CPU, which means that this 701 * CPU can safely enter RCU read-side critical sections. In other words, 702 * if the current CPU is not in its idle loop or is in an interrupt or 703 * NMI handler, return true. 704 * 705 * Make notrace because it can be called by the internal functions of 706 * ftrace, and making this notrace removes unnecessary recursion calls. 707 */ 708 notrace bool rcu_is_watching(void) 709 { 710 bool ret; 711 712 preempt_disable_notrace(); 713 ret = !rcu_dynticks_curr_cpu_in_eqs(); 714 preempt_enable_notrace(); 715 return ret; 716 } 717 EXPORT_SYMBOL_GPL(rcu_is_watching); 718 719 /* 720 * If a holdout task is actually running, request an urgent quiescent 721 * state from its CPU. This is unsynchronized, so migrations can cause 722 * the request to go to the wrong CPU. Which is OK, all that will happen 723 * is that the CPU's next context switch will be a bit slower and next 724 * time around this task will generate another request. 725 */ 726 void rcu_request_urgent_qs_task(struct task_struct *t) 727 { 728 int cpu; 729 730 barrier(); 731 cpu = task_cpu(t); 732 if (!task_curr(t)) 733 return; /* This task is not running on that CPU. */ 734 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 735 } 736 737 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 738 739 /* 740 * Is the current CPU online as far as RCU is concerned? 741 * 742 * Disable preemption to avoid false positives that could otherwise 743 * happen due to the current CPU number being sampled, this task being 744 * preempted, its old CPU being taken offline, resuming on some other CPU, 745 * then determining that its old CPU is now offline. 746 * 747 * Disable checking if in an NMI handler because we cannot safely 748 * report errors from NMI handlers anyway. In addition, it is OK to use 749 * RCU on an offline processor during initial boot, hence the check for 750 * rcu_scheduler_fully_active. 751 */ 752 bool rcu_lockdep_current_cpu_online(void) 753 { 754 struct rcu_data *rdp; 755 bool ret = false; 756 757 if (in_nmi() || !rcu_scheduler_fully_active) 758 return true; 759 preempt_disable_notrace(); 760 rdp = this_cpu_ptr(&rcu_data); 761 /* 762 * Strictly, we care here about the case where the current CPU is 763 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask 764 * not being up to date. So arch_spin_is_locked() might have a 765 * false positive if it's held by some *other* CPU, but that's 766 * OK because that just means a false *negative* on the warning. 767 */ 768 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) 769 ret = true; 770 preempt_enable_notrace(); 771 return ret; 772 } 773 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 774 775 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 776 777 /* 778 * When trying to report a quiescent state on behalf of some other CPU, 779 * it is our responsibility to check for and handle potential overflow 780 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 781 * After all, the CPU might be in deep idle state, and thus executing no 782 * code whatsoever. 783 */ 784 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 785 { 786 raw_lockdep_assert_held_rcu_node(rnp); 787 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 788 rnp->gp_seq)) 789 WRITE_ONCE(rdp->gpwrap, true); 790 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 791 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 792 } 793 794 /* 795 * Snapshot the specified CPU's dynticks counter so that we can later 796 * credit them with an implicit quiescent state. Return 1 if this CPU 797 * is in dynticks idle mode, which is an extended quiescent state. 798 */ 799 static int dyntick_save_progress_counter(struct rcu_data *rdp) 800 { 801 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu); 802 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 803 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 804 rcu_gpnum_ovf(rdp->mynode, rdp); 805 return 1; 806 } 807 return 0; 808 } 809 810 /* 811 * Return true if the specified CPU has passed through a quiescent 812 * state by virtue of being in or having passed through an dynticks 813 * idle state since the last call to dyntick_save_progress_counter() 814 * for this same CPU, or by virtue of having been offline. 815 */ 816 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 817 { 818 unsigned long jtsq; 819 struct rcu_node *rnp = rdp->mynode; 820 821 /* 822 * If the CPU passed through or entered a dynticks idle phase with 823 * no active irq/NMI handlers, then we can safely pretend that the CPU 824 * already acknowledged the request to pass through a quiescent 825 * state. Either way, that CPU cannot possibly be in an RCU 826 * read-side critical section that started before the beginning 827 * of the current RCU grace period. 828 */ 829 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 830 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 831 rcu_gpnum_ovf(rnp, rdp); 832 return 1; 833 } 834 835 /* 836 * Complain if a CPU that is considered to be offline from RCU's 837 * perspective has not yet reported a quiescent state. After all, 838 * the offline CPU should have reported a quiescent state during 839 * the CPU-offline process, or, failing that, by rcu_gp_init() 840 * if it ran concurrently with either the CPU going offline or the 841 * last task on a leaf rcu_node structure exiting its RCU read-side 842 * critical section while all CPUs corresponding to that structure 843 * are offline. This added warning detects bugs in any of these 844 * code paths. 845 * 846 * The rcu_node structure's ->lock is held here, which excludes 847 * the relevant portions the CPU-hotplug code, the grace-period 848 * initialization code, and the rcu_read_unlock() code paths. 849 * 850 * For more detail, please refer to the "Hotplug CPU" section 851 * of RCU's Requirements documentation. 852 */ 853 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { 854 struct rcu_node *rnp1; 855 856 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 857 __func__, rnp->grplo, rnp->grphi, rnp->level, 858 (long)rnp->gp_seq, (long)rnp->completedqs); 859 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 860 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 861 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 862 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 863 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], 864 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 865 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 866 return 1; /* Break things loose after complaining. */ 867 } 868 869 /* 870 * A CPU running for an extended time within the kernel can 871 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 872 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 873 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 874 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 875 * variable are safe because the assignments are repeated if this 876 * CPU failed to pass through a quiescent state. This code 877 * also checks .jiffies_resched in case jiffies_to_sched_qs 878 * is set way high. 879 */ 880 jtsq = READ_ONCE(jiffies_to_sched_qs); 881 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && 882 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 883 time_after(jiffies, rcu_state.jiffies_resched) || 884 rcu_state.cbovld)) { 885 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); 886 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 887 smp_store_release(&rdp->rcu_urgent_qs, true); 888 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 889 WRITE_ONCE(rdp->rcu_urgent_qs, true); 890 } 891 892 /* 893 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 894 * The above code handles this, but only for straight cond_resched(). 895 * And some in-kernel loops check need_resched() before calling 896 * cond_resched(), which defeats the above code for CPUs that are 897 * running in-kernel with scheduling-clock interrupts disabled. 898 * So hit them over the head with the resched_cpu() hammer! 899 */ 900 if (tick_nohz_full_cpu(rdp->cpu) && 901 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 902 rcu_state.cbovld)) { 903 WRITE_ONCE(rdp->rcu_urgent_qs, true); 904 resched_cpu(rdp->cpu); 905 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 906 } 907 908 /* 909 * If more than halfway to RCU CPU stall-warning time, invoke 910 * resched_cpu() more frequently to try to loosen things up a bit. 911 * Also check to see if the CPU is getting hammered with interrupts, 912 * but only once per grace period, just to keep the IPIs down to 913 * a dull roar. 914 */ 915 if (time_after(jiffies, rcu_state.jiffies_resched)) { 916 if (time_after(jiffies, 917 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 918 resched_cpu(rdp->cpu); 919 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 920 } 921 if (IS_ENABLED(CONFIG_IRQ_WORK) && 922 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 923 (rnp->ffmask & rdp->grpmask)) { 924 rdp->rcu_iw_pending = true; 925 rdp->rcu_iw_gp_seq = rnp->gp_seq; 926 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 927 } 928 } 929 930 return 0; 931 } 932 933 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 934 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 935 unsigned long gp_seq_req, const char *s) 936 { 937 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 938 gp_seq_req, rnp->level, 939 rnp->grplo, rnp->grphi, s); 940 } 941 942 /* 943 * rcu_start_this_gp - Request the start of a particular grace period 944 * @rnp_start: The leaf node of the CPU from which to start. 945 * @rdp: The rcu_data corresponding to the CPU from which to start. 946 * @gp_seq_req: The gp_seq of the grace period to start. 947 * 948 * Start the specified grace period, as needed to handle newly arrived 949 * callbacks. The required future grace periods are recorded in each 950 * rcu_node structure's ->gp_seq_needed field. Returns true if there 951 * is reason to awaken the grace-period kthread. 952 * 953 * The caller must hold the specified rcu_node structure's ->lock, which 954 * is why the caller is responsible for waking the grace-period kthread. 955 * 956 * Returns true if the GP thread needs to be awakened else false. 957 */ 958 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 959 unsigned long gp_seq_req) 960 { 961 bool ret = false; 962 struct rcu_node *rnp; 963 964 /* 965 * Use funnel locking to either acquire the root rcu_node 966 * structure's lock or bail out if the need for this grace period 967 * has already been recorded -- or if that grace period has in 968 * fact already started. If there is already a grace period in 969 * progress in a non-leaf node, no recording is needed because the 970 * end of the grace period will scan the leaf rcu_node structures. 971 * Note that rnp_start->lock must not be released. 972 */ 973 raw_lockdep_assert_held_rcu_node(rnp_start); 974 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 975 for (rnp = rnp_start; 1; rnp = rnp->parent) { 976 if (rnp != rnp_start) 977 raw_spin_lock_rcu_node(rnp); 978 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 979 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 980 (rnp != rnp_start && 981 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 982 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 983 TPS("Prestarted")); 984 goto unlock_out; 985 } 986 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 987 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 988 /* 989 * We just marked the leaf or internal node, and a 990 * grace period is in progress, which means that 991 * rcu_gp_cleanup() will see the marking. Bail to 992 * reduce contention. 993 */ 994 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 995 TPS("Startedleaf")); 996 goto unlock_out; 997 } 998 if (rnp != rnp_start && rnp->parent != NULL) 999 raw_spin_unlock_rcu_node(rnp); 1000 if (!rnp->parent) 1001 break; /* At root, and perhaps also leaf. */ 1002 } 1003 1004 /* If GP already in progress, just leave, otherwise start one. */ 1005 if (rcu_gp_in_progress()) { 1006 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1007 goto unlock_out; 1008 } 1009 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1010 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1011 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1012 if (!READ_ONCE(rcu_state.gp_kthread)) { 1013 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1014 goto unlock_out; 1015 } 1016 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1017 ret = true; /* Caller must wake GP kthread. */ 1018 unlock_out: 1019 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1020 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1021 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1022 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1023 } 1024 if (rnp != rnp_start) 1025 raw_spin_unlock_rcu_node(rnp); 1026 return ret; 1027 } 1028 1029 /* 1030 * Clean up any old requests for the just-ended grace period. Also return 1031 * whether any additional grace periods have been requested. 1032 */ 1033 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1034 { 1035 bool needmore; 1036 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1037 1038 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1039 if (!needmore) 1040 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1041 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1042 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1043 return needmore; 1044 } 1045 1046 /* 1047 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1048 * interrupt or softirq handler, in which case we just might immediately 1049 * sleep upon return, resulting in a grace-period hang), and don't bother 1050 * awakening when there is nothing for the grace-period kthread to do 1051 * (as in several CPUs raced to awaken, we lost), and finally don't try 1052 * to awaken a kthread that has not yet been created. If all those checks 1053 * are passed, track some debug information and awaken. 1054 * 1055 * So why do the self-wakeup when in an interrupt or softirq handler 1056 * in the grace-period kthread's context? Because the kthread might have 1057 * been interrupted just as it was going to sleep, and just after the final 1058 * pre-sleep check of the awaken condition. In this case, a wakeup really 1059 * is required, and is therefore supplied. 1060 */ 1061 static void rcu_gp_kthread_wake(void) 1062 { 1063 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1064 1065 if ((current == t && !in_hardirq() && !in_serving_softirq()) || 1066 !READ_ONCE(rcu_state.gp_flags) || !t) 1067 return; 1068 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1069 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1070 swake_up_one(&rcu_state.gp_wq); 1071 } 1072 1073 /* 1074 * If there is room, assign a ->gp_seq number to any callbacks on this 1075 * CPU that have not already been assigned. Also accelerate any callbacks 1076 * that were previously assigned a ->gp_seq number that has since proven 1077 * to be too conservative, which can happen if callbacks get assigned a 1078 * ->gp_seq number while RCU is idle, but with reference to a non-root 1079 * rcu_node structure. This function is idempotent, so it does not hurt 1080 * to call it repeatedly. Returns an flag saying that we should awaken 1081 * the RCU grace-period kthread. 1082 * 1083 * The caller must hold rnp->lock with interrupts disabled. 1084 */ 1085 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1086 { 1087 unsigned long gp_seq_req; 1088 bool ret = false; 1089 1090 rcu_lockdep_assert_cblist_protected(rdp); 1091 raw_lockdep_assert_held_rcu_node(rnp); 1092 1093 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1094 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1095 return false; 1096 1097 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); 1098 1099 /* 1100 * Callbacks are often registered with incomplete grace-period 1101 * information. Something about the fact that getting exact 1102 * information requires acquiring a global lock... RCU therefore 1103 * makes a conservative estimate of the grace period number at which 1104 * a given callback will become ready to invoke. The following 1105 * code checks this estimate and improves it when possible, thus 1106 * accelerating callback invocation to an earlier grace-period 1107 * number. 1108 */ 1109 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1110 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1111 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1112 1113 /* Trace depending on how much we were able to accelerate. */ 1114 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1115 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); 1116 else 1117 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); 1118 1119 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); 1120 1121 return ret; 1122 } 1123 1124 /* 1125 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1126 * rcu_node structure's ->lock be held. It consults the cached value 1127 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1128 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1129 * while holding the leaf rcu_node structure's ->lock. 1130 */ 1131 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1132 struct rcu_data *rdp) 1133 { 1134 unsigned long c; 1135 bool needwake; 1136 1137 rcu_lockdep_assert_cblist_protected(rdp); 1138 c = rcu_seq_snap(&rcu_state.gp_seq); 1139 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1140 /* Old request still live, so mark recent callbacks. */ 1141 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1142 return; 1143 } 1144 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1145 needwake = rcu_accelerate_cbs(rnp, rdp); 1146 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1147 if (needwake) 1148 rcu_gp_kthread_wake(); 1149 } 1150 1151 /* 1152 * Move any callbacks whose grace period has completed to the 1153 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1154 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1155 * sublist. This function is idempotent, so it does not hurt to 1156 * invoke it repeatedly. As long as it is not invoked -too- often... 1157 * Returns true if the RCU grace-period kthread needs to be awakened. 1158 * 1159 * The caller must hold rnp->lock with interrupts disabled. 1160 */ 1161 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1162 { 1163 rcu_lockdep_assert_cblist_protected(rdp); 1164 raw_lockdep_assert_held_rcu_node(rnp); 1165 1166 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1167 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1168 return false; 1169 1170 /* 1171 * Find all callbacks whose ->gp_seq numbers indicate that they 1172 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1173 */ 1174 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1175 1176 /* Classify any remaining callbacks. */ 1177 return rcu_accelerate_cbs(rnp, rdp); 1178 } 1179 1180 /* 1181 * Move and classify callbacks, but only if doing so won't require 1182 * that the RCU grace-period kthread be awakened. 1183 */ 1184 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1185 struct rcu_data *rdp) 1186 { 1187 rcu_lockdep_assert_cblist_protected(rdp); 1188 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) 1189 return; 1190 // The grace period cannot end while we hold the rcu_node lock. 1191 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) 1192 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1193 raw_spin_unlock_rcu_node(rnp); 1194 } 1195 1196 /* 1197 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a 1198 * quiescent state. This is intended to be invoked when the CPU notices 1199 * a new grace period. 1200 */ 1201 static void rcu_strict_gp_check_qs(void) 1202 { 1203 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 1204 rcu_read_lock(); 1205 rcu_read_unlock(); 1206 } 1207 } 1208 1209 /* 1210 * Update CPU-local rcu_data state to record the beginnings and ends of 1211 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1212 * structure corresponding to the current CPU, and must have irqs disabled. 1213 * Returns true if the grace-period kthread needs to be awakened. 1214 */ 1215 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1216 { 1217 bool ret = false; 1218 bool need_qs; 1219 const bool offloaded = rcu_rdp_is_offloaded(rdp); 1220 1221 raw_lockdep_assert_held_rcu_node(rnp); 1222 1223 if (rdp->gp_seq == rnp->gp_seq) 1224 return false; /* Nothing to do. */ 1225 1226 /* Handle the ends of any preceding grace periods first. */ 1227 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1228 unlikely(READ_ONCE(rdp->gpwrap))) { 1229 if (!offloaded) 1230 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1231 rdp->core_needs_qs = false; 1232 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1233 } else { 1234 if (!offloaded) 1235 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1236 if (rdp->core_needs_qs) 1237 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1238 } 1239 1240 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1241 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1242 unlikely(READ_ONCE(rdp->gpwrap))) { 1243 /* 1244 * If the current grace period is waiting for this CPU, 1245 * set up to detect a quiescent state, otherwise don't 1246 * go looking for one. 1247 */ 1248 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1249 need_qs = !!(rnp->qsmask & rdp->grpmask); 1250 rdp->cpu_no_qs.b.norm = need_qs; 1251 rdp->core_needs_qs = need_qs; 1252 zero_cpu_stall_ticks(rdp); 1253 } 1254 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1255 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1256 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1257 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) 1258 WRITE_ONCE(rdp->last_sched_clock, jiffies); 1259 WRITE_ONCE(rdp->gpwrap, false); 1260 rcu_gpnum_ovf(rnp, rdp); 1261 return ret; 1262 } 1263 1264 static void note_gp_changes(struct rcu_data *rdp) 1265 { 1266 unsigned long flags; 1267 bool needwake; 1268 struct rcu_node *rnp; 1269 1270 local_irq_save(flags); 1271 rnp = rdp->mynode; 1272 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1273 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1274 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1275 local_irq_restore(flags); 1276 return; 1277 } 1278 needwake = __note_gp_changes(rnp, rdp); 1279 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1280 rcu_strict_gp_check_qs(); 1281 if (needwake) 1282 rcu_gp_kthread_wake(); 1283 } 1284 1285 static atomic_t *rcu_gp_slow_suppress; 1286 1287 /* Register a counter to suppress debugging grace-period delays. */ 1288 void rcu_gp_slow_register(atomic_t *rgssp) 1289 { 1290 WARN_ON_ONCE(rcu_gp_slow_suppress); 1291 1292 WRITE_ONCE(rcu_gp_slow_suppress, rgssp); 1293 } 1294 EXPORT_SYMBOL_GPL(rcu_gp_slow_register); 1295 1296 /* Unregister a counter, with NULL for not caring which. */ 1297 void rcu_gp_slow_unregister(atomic_t *rgssp) 1298 { 1299 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress); 1300 1301 WRITE_ONCE(rcu_gp_slow_suppress, NULL); 1302 } 1303 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister); 1304 1305 static bool rcu_gp_slow_is_suppressed(void) 1306 { 1307 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress); 1308 1309 return rgssp && atomic_read(rgssp); 1310 } 1311 1312 static void rcu_gp_slow(int delay) 1313 { 1314 if (!rcu_gp_slow_is_suppressed() && delay > 0 && 1315 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1316 schedule_timeout_idle(delay); 1317 } 1318 1319 static unsigned long sleep_duration; 1320 1321 /* Allow rcutorture to stall the grace-period kthread. */ 1322 void rcu_gp_set_torture_wait(int duration) 1323 { 1324 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1325 WRITE_ONCE(sleep_duration, duration); 1326 } 1327 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1328 1329 /* Actually implement the aforementioned wait. */ 1330 static void rcu_gp_torture_wait(void) 1331 { 1332 unsigned long duration; 1333 1334 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1335 return; 1336 duration = xchg(&sleep_duration, 0UL); 1337 if (duration > 0) { 1338 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1339 schedule_timeout_idle(duration); 1340 pr_alert("%s: Wait complete\n", __func__); 1341 } 1342 } 1343 1344 /* 1345 * Handler for on_each_cpu() to invoke the target CPU's RCU core 1346 * processing. 1347 */ 1348 static void rcu_strict_gp_boundary(void *unused) 1349 { 1350 invoke_rcu_core(); 1351 } 1352 1353 // Has rcu_init() been invoked? This is used (for example) to determine 1354 // whether spinlocks may be acquired safely. 1355 static bool rcu_init_invoked(void) 1356 { 1357 return !!rcu_state.n_online_cpus; 1358 } 1359 1360 // Make the polled API aware of the beginning of a grace period. 1361 static void rcu_poll_gp_seq_start(unsigned long *snap) 1362 { 1363 struct rcu_node *rnp = rcu_get_root(); 1364 1365 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1366 raw_lockdep_assert_held_rcu_node(rnp); 1367 1368 // If RCU was idle, note beginning of GP. 1369 if (!rcu_seq_state(rcu_state.gp_seq_polled)) 1370 rcu_seq_start(&rcu_state.gp_seq_polled); 1371 1372 // Either way, record current state. 1373 *snap = rcu_state.gp_seq_polled; 1374 } 1375 1376 // Make the polled API aware of the end of a grace period. 1377 static void rcu_poll_gp_seq_end(unsigned long *snap) 1378 { 1379 struct rcu_node *rnp = rcu_get_root(); 1380 1381 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1382 raw_lockdep_assert_held_rcu_node(rnp); 1383 1384 // If the previously noted GP is still in effect, record the 1385 // end of that GP. Either way, zero counter to avoid counter-wrap 1386 // problems. 1387 if (*snap && *snap == rcu_state.gp_seq_polled) { 1388 rcu_seq_end(&rcu_state.gp_seq_polled); 1389 rcu_state.gp_seq_polled_snap = 0; 1390 rcu_state.gp_seq_polled_exp_snap = 0; 1391 } else { 1392 *snap = 0; 1393 } 1394 } 1395 1396 // Make the polled API aware of the beginning of a grace period, but 1397 // where caller does not hold the root rcu_node structure's lock. 1398 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap) 1399 { 1400 unsigned long flags; 1401 struct rcu_node *rnp = rcu_get_root(); 1402 1403 if (rcu_init_invoked()) { 1404 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1405 lockdep_assert_irqs_enabled(); 1406 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1407 } 1408 rcu_poll_gp_seq_start(snap); 1409 if (rcu_init_invoked()) 1410 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1411 } 1412 1413 // Make the polled API aware of the end of a grace period, but where 1414 // caller does not hold the root rcu_node structure's lock. 1415 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap) 1416 { 1417 unsigned long flags; 1418 struct rcu_node *rnp = rcu_get_root(); 1419 1420 if (rcu_init_invoked()) { 1421 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1422 lockdep_assert_irqs_enabled(); 1423 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1424 } 1425 rcu_poll_gp_seq_end(snap); 1426 if (rcu_init_invoked()) 1427 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1428 } 1429 1430 /* 1431 * Initialize a new grace period. Return false if no grace period required. 1432 */ 1433 static noinline_for_stack bool rcu_gp_init(void) 1434 { 1435 unsigned long flags; 1436 unsigned long oldmask; 1437 unsigned long mask; 1438 struct rcu_data *rdp; 1439 struct rcu_node *rnp = rcu_get_root(); 1440 1441 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1442 raw_spin_lock_irq_rcu_node(rnp); 1443 if (!READ_ONCE(rcu_state.gp_flags)) { 1444 /* Spurious wakeup, tell caller to go back to sleep. */ 1445 raw_spin_unlock_irq_rcu_node(rnp); 1446 return false; 1447 } 1448 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1449 1450 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1451 /* 1452 * Grace period already in progress, don't start another. 1453 * Not supposed to be able to happen. 1454 */ 1455 raw_spin_unlock_irq_rcu_node(rnp); 1456 return false; 1457 } 1458 1459 /* Advance to a new grace period and initialize state. */ 1460 record_gp_stall_check_time(); 1461 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1462 rcu_seq_start(&rcu_state.gp_seq); 1463 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1464 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1465 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); 1466 raw_spin_unlock_irq_rcu_node(rnp); 1467 1468 /* 1469 * Apply per-leaf buffered online and offline operations to 1470 * the rcu_node tree. Note that this new grace period need not 1471 * wait for subsequent online CPUs, and that RCU hooks in the CPU 1472 * offlining path, when combined with checks in this function, 1473 * will handle CPUs that are currently going offline or that will 1474 * go offline later. Please also refer to "Hotplug CPU" section 1475 * of RCU's Requirements documentation. 1476 */ 1477 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); 1478 /* Exclude CPU hotplug operations. */ 1479 rcu_for_each_leaf_node(rnp) { 1480 local_irq_save(flags); 1481 arch_spin_lock(&rcu_state.ofl_lock); 1482 raw_spin_lock_rcu_node(rnp); 1483 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1484 !rnp->wait_blkd_tasks) { 1485 /* Nothing to do on this leaf rcu_node structure. */ 1486 raw_spin_unlock_rcu_node(rnp); 1487 arch_spin_unlock(&rcu_state.ofl_lock); 1488 local_irq_restore(flags); 1489 continue; 1490 } 1491 1492 /* Record old state, apply changes to ->qsmaskinit field. */ 1493 oldmask = rnp->qsmaskinit; 1494 rnp->qsmaskinit = rnp->qsmaskinitnext; 1495 1496 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1497 if (!oldmask != !rnp->qsmaskinit) { 1498 if (!oldmask) { /* First online CPU for rcu_node. */ 1499 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1500 rcu_init_new_rnp(rnp); 1501 } else if (rcu_preempt_has_tasks(rnp)) { 1502 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1503 } else { /* Last offline CPU and can propagate. */ 1504 rcu_cleanup_dead_rnp(rnp); 1505 } 1506 } 1507 1508 /* 1509 * If all waited-on tasks from prior grace period are 1510 * done, and if all this rcu_node structure's CPUs are 1511 * still offline, propagate up the rcu_node tree and 1512 * clear ->wait_blkd_tasks. Otherwise, if one of this 1513 * rcu_node structure's CPUs has since come back online, 1514 * simply clear ->wait_blkd_tasks. 1515 */ 1516 if (rnp->wait_blkd_tasks && 1517 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1518 rnp->wait_blkd_tasks = false; 1519 if (!rnp->qsmaskinit) 1520 rcu_cleanup_dead_rnp(rnp); 1521 } 1522 1523 raw_spin_unlock_rcu_node(rnp); 1524 arch_spin_unlock(&rcu_state.ofl_lock); 1525 local_irq_restore(flags); 1526 } 1527 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1528 1529 /* 1530 * Set the quiescent-state-needed bits in all the rcu_node 1531 * structures for all currently online CPUs in breadth-first 1532 * order, starting from the root rcu_node structure, relying on the 1533 * layout of the tree within the rcu_state.node[] array. Note that 1534 * other CPUs will access only the leaves of the hierarchy, thus 1535 * seeing that no grace period is in progress, at least until the 1536 * corresponding leaf node has been initialized. 1537 * 1538 * The grace period cannot complete until the initialization 1539 * process finishes, because this kthread handles both. 1540 */ 1541 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT); 1542 rcu_for_each_node_breadth_first(rnp) { 1543 rcu_gp_slow(gp_init_delay); 1544 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1545 rdp = this_cpu_ptr(&rcu_data); 1546 rcu_preempt_check_blocked_tasks(rnp); 1547 rnp->qsmask = rnp->qsmaskinit; 1548 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1549 if (rnp == rdp->mynode) 1550 (void)__note_gp_changes(rnp, rdp); 1551 rcu_preempt_boost_start_gp(rnp); 1552 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1553 rnp->level, rnp->grplo, 1554 rnp->grphi, rnp->qsmask); 1555 /* Quiescent states for tasks on any now-offline CPUs. */ 1556 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1557 rnp->rcu_gp_init_mask = mask; 1558 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1559 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1560 else 1561 raw_spin_unlock_irq_rcu_node(rnp); 1562 cond_resched_tasks_rcu_qs(); 1563 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1564 } 1565 1566 // If strict, make all CPUs aware of new grace period. 1567 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 1568 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 1569 1570 return true; 1571 } 1572 1573 /* 1574 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1575 * time. 1576 */ 1577 static bool rcu_gp_fqs_check_wake(int *gfp) 1578 { 1579 struct rcu_node *rnp = rcu_get_root(); 1580 1581 // If under overload conditions, force an immediate FQS scan. 1582 if (*gfp & RCU_GP_FLAG_OVLD) 1583 return true; 1584 1585 // Someone like call_rcu() requested a force-quiescent-state scan. 1586 *gfp = READ_ONCE(rcu_state.gp_flags); 1587 if (*gfp & RCU_GP_FLAG_FQS) 1588 return true; 1589 1590 // The current grace period has completed. 1591 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1592 return true; 1593 1594 return false; 1595 } 1596 1597 /* 1598 * Do one round of quiescent-state forcing. 1599 */ 1600 static void rcu_gp_fqs(bool first_time) 1601 { 1602 struct rcu_node *rnp = rcu_get_root(); 1603 1604 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1605 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1); 1606 if (first_time) { 1607 /* Collect dyntick-idle snapshots. */ 1608 force_qs_rnp(dyntick_save_progress_counter); 1609 } else { 1610 /* Handle dyntick-idle and offline CPUs. */ 1611 force_qs_rnp(rcu_implicit_dynticks_qs); 1612 } 1613 /* Clear flag to prevent immediate re-entry. */ 1614 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1615 raw_spin_lock_irq_rcu_node(rnp); 1616 WRITE_ONCE(rcu_state.gp_flags, 1617 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1618 raw_spin_unlock_irq_rcu_node(rnp); 1619 } 1620 } 1621 1622 /* 1623 * Loop doing repeated quiescent-state forcing until the grace period ends. 1624 */ 1625 static noinline_for_stack void rcu_gp_fqs_loop(void) 1626 { 1627 bool first_gp_fqs = true; 1628 int gf = 0; 1629 unsigned long j; 1630 int ret; 1631 struct rcu_node *rnp = rcu_get_root(); 1632 1633 j = READ_ONCE(jiffies_till_first_fqs); 1634 if (rcu_state.cbovld) 1635 gf = RCU_GP_FLAG_OVLD; 1636 ret = 0; 1637 for (;;) { 1638 if (rcu_state.cbovld) { 1639 j = (j + 2) / 3; 1640 if (j <= 0) 1641 j = 1; 1642 } 1643 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) { 1644 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j); 1645 /* 1646 * jiffies_force_qs before RCU_GP_WAIT_FQS state 1647 * update; required for stall checks. 1648 */ 1649 smp_wmb(); 1650 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1651 jiffies + (j ? 3 * j : 2)); 1652 } 1653 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1654 TPS("fqswait")); 1655 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS); 1656 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq, 1657 rcu_gp_fqs_check_wake(&gf), j); 1658 rcu_gp_torture_wait(); 1659 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS); 1660 /* Locking provides needed memory barriers. */ 1661 /* 1662 * Exit the loop if the root rcu_node structure indicates that the grace period 1663 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check 1664 * is required only for single-node rcu_node trees because readers blocking 1665 * the current grace period are queued only on leaf rcu_node structures. 1666 * For multi-node trees, checking the root node's ->qsmask suffices, because a 1667 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from 1668 * the corresponding leaf nodes have passed through their quiescent state. 1669 */ 1670 if (!READ_ONCE(rnp->qsmask) && 1671 !rcu_preempt_blocked_readers_cgp(rnp)) 1672 break; 1673 /* If time for quiescent-state forcing, do it. */ 1674 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 1675 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) { 1676 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1677 TPS("fqsstart")); 1678 rcu_gp_fqs(first_gp_fqs); 1679 gf = 0; 1680 if (first_gp_fqs) { 1681 first_gp_fqs = false; 1682 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 1683 } 1684 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1685 TPS("fqsend")); 1686 cond_resched_tasks_rcu_qs(); 1687 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1688 ret = 0; /* Force full wait till next FQS. */ 1689 j = READ_ONCE(jiffies_till_next_fqs); 1690 } else { 1691 /* Deal with stray signal. */ 1692 cond_resched_tasks_rcu_qs(); 1693 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1694 WARN_ON(signal_pending(current)); 1695 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1696 TPS("fqswaitsig")); 1697 ret = 1; /* Keep old FQS timing. */ 1698 j = jiffies; 1699 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1700 j = 1; 1701 else 1702 j = rcu_state.jiffies_force_qs - j; 1703 gf = 0; 1704 } 1705 } 1706 } 1707 1708 /* 1709 * Clean up after the old grace period. 1710 */ 1711 static noinline void rcu_gp_cleanup(void) 1712 { 1713 int cpu; 1714 bool needgp = false; 1715 unsigned long gp_duration; 1716 unsigned long new_gp_seq; 1717 bool offloaded; 1718 struct rcu_data *rdp; 1719 struct rcu_node *rnp = rcu_get_root(); 1720 struct swait_queue_head *sq; 1721 1722 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1723 raw_spin_lock_irq_rcu_node(rnp); 1724 rcu_state.gp_end = jiffies; 1725 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 1726 if (gp_duration > rcu_state.gp_max) 1727 rcu_state.gp_max = gp_duration; 1728 1729 /* 1730 * We know the grace period is complete, but to everyone else 1731 * it appears to still be ongoing. But it is also the case 1732 * that to everyone else it looks like there is nothing that 1733 * they can do to advance the grace period. It is therefore 1734 * safe for us to drop the lock in order to mark the grace 1735 * period as completed in all of the rcu_node structures. 1736 */ 1737 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap); 1738 raw_spin_unlock_irq_rcu_node(rnp); 1739 1740 /* 1741 * Propagate new ->gp_seq value to rcu_node structures so that 1742 * other CPUs don't have to wait until the start of the next grace 1743 * period to process their callbacks. This also avoids some nasty 1744 * RCU grace-period initialization races by forcing the end of 1745 * the current grace period to be completely recorded in all of 1746 * the rcu_node structures before the beginning of the next grace 1747 * period is recorded in any of the rcu_node structures. 1748 */ 1749 new_gp_seq = rcu_state.gp_seq; 1750 rcu_seq_end(&new_gp_seq); 1751 rcu_for_each_node_breadth_first(rnp) { 1752 raw_spin_lock_irq_rcu_node(rnp); 1753 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 1754 dump_blkd_tasks(rnp, 10); 1755 WARN_ON_ONCE(rnp->qsmask); 1756 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 1757 if (!rnp->parent) 1758 smp_mb(); // Order against failing poll_state_synchronize_rcu_full(). 1759 rdp = this_cpu_ptr(&rcu_data); 1760 if (rnp == rdp->mynode) 1761 needgp = __note_gp_changes(rnp, rdp) || needgp; 1762 /* smp_mb() provided by prior unlock-lock pair. */ 1763 needgp = rcu_future_gp_cleanup(rnp) || needgp; 1764 // Reset overload indication for CPUs no longer overloaded 1765 if (rcu_is_leaf_node(rnp)) 1766 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 1767 rdp = per_cpu_ptr(&rcu_data, cpu); 1768 check_cb_ovld_locked(rdp, rnp); 1769 } 1770 sq = rcu_nocb_gp_get(rnp); 1771 raw_spin_unlock_irq_rcu_node(rnp); 1772 rcu_nocb_gp_cleanup(sq); 1773 cond_resched_tasks_rcu_qs(); 1774 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1775 rcu_gp_slow(gp_cleanup_delay); 1776 } 1777 rnp = rcu_get_root(); 1778 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 1779 1780 /* Declare grace period done, trace first to use old GP number. */ 1781 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 1782 rcu_seq_end(&rcu_state.gp_seq); 1783 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1784 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE); 1785 /* Check for GP requests since above loop. */ 1786 rdp = this_cpu_ptr(&rcu_data); 1787 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 1788 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 1789 TPS("CleanupMore")); 1790 needgp = true; 1791 } 1792 /* Advance CBs to reduce false positives below. */ 1793 offloaded = rcu_rdp_is_offloaded(rdp); 1794 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 1795 1796 // We get here if a grace period was needed (“needgp”) 1797 // and the above call to rcu_accelerate_cbs() did not set 1798 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records 1799 // the need for another grace period). The purpose 1800 // of the “offloaded” check is to avoid invoking 1801 // rcu_accelerate_cbs() on an offloaded CPU because we do not 1802 // hold the ->nocb_lock needed to safely access an offloaded 1803 // ->cblist. We do not want to acquire that lock because 1804 // it can be heavily contended during callback floods. 1805 1806 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 1807 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1808 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq")); 1809 } else { 1810 1811 // We get here either if there is no need for an 1812 // additional grace period or if rcu_accelerate_cbs() has 1813 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 1814 // So all we need to do is to clear all of the other 1815 // ->gp_flags bits. 1816 1817 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT); 1818 } 1819 raw_spin_unlock_irq_rcu_node(rnp); 1820 1821 // If strict, make all CPUs aware of the end of the old grace period. 1822 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 1823 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 1824 } 1825 1826 /* 1827 * Body of kthread that handles grace periods. 1828 */ 1829 static int __noreturn rcu_gp_kthread(void *unused) 1830 { 1831 rcu_bind_gp_kthread(); 1832 for (;;) { 1833 1834 /* Handle grace-period start. */ 1835 for (;;) { 1836 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1837 TPS("reqwait")); 1838 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS); 1839 swait_event_idle_exclusive(rcu_state.gp_wq, 1840 READ_ONCE(rcu_state.gp_flags) & 1841 RCU_GP_FLAG_INIT); 1842 rcu_gp_torture_wait(); 1843 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS); 1844 /* Locking provides needed memory barrier. */ 1845 if (rcu_gp_init()) 1846 break; 1847 cond_resched_tasks_rcu_qs(); 1848 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1849 WARN_ON(signal_pending(current)); 1850 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1851 TPS("reqwaitsig")); 1852 } 1853 1854 /* Handle quiescent-state forcing. */ 1855 rcu_gp_fqs_loop(); 1856 1857 /* Handle grace-period end. */ 1858 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP); 1859 rcu_gp_cleanup(); 1860 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED); 1861 } 1862 } 1863 1864 /* 1865 * Report a full set of quiescent states to the rcu_state data structure. 1866 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 1867 * another grace period is required. Whether we wake the grace-period 1868 * kthread or it awakens itself for the next round of quiescent-state 1869 * forcing, that kthread will clean up after the just-completed grace 1870 * period. Note that the caller must hold rnp->lock, which is released 1871 * before return. 1872 */ 1873 static void rcu_report_qs_rsp(unsigned long flags) 1874 __releases(rcu_get_root()->lock) 1875 { 1876 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 1877 WARN_ON_ONCE(!rcu_gp_in_progress()); 1878 WRITE_ONCE(rcu_state.gp_flags, 1879 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 1880 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 1881 rcu_gp_kthread_wake(); 1882 } 1883 1884 /* 1885 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 1886 * Allows quiescent states for a group of CPUs to be reported at one go 1887 * to the specified rcu_node structure, though all the CPUs in the group 1888 * must be represented by the same rcu_node structure (which need not be a 1889 * leaf rcu_node structure, though it often will be). The gps parameter 1890 * is the grace-period snapshot, which means that the quiescent states 1891 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 1892 * must be held upon entry, and it is released before return. 1893 * 1894 * As a special case, if mask is zero, the bit-already-cleared check is 1895 * disabled. This allows propagating quiescent state due to resumed tasks 1896 * during grace-period initialization. 1897 */ 1898 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 1899 unsigned long gps, unsigned long flags) 1900 __releases(rnp->lock) 1901 { 1902 unsigned long oldmask = 0; 1903 struct rcu_node *rnp_c; 1904 1905 raw_lockdep_assert_held_rcu_node(rnp); 1906 1907 /* Walk up the rcu_node hierarchy. */ 1908 for (;;) { 1909 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 1910 1911 /* 1912 * Our bit has already been cleared, or the 1913 * relevant grace period is already over, so done. 1914 */ 1915 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1916 return; 1917 } 1918 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 1919 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 1920 rcu_preempt_blocked_readers_cgp(rnp)); 1921 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 1922 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 1923 mask, rnp->qsmask, rnp->level, 1924 rnp->grplo, rnp->grphi, 1925 !!rnp->gp_tasks); 1926 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 1927 1928 /* Other bits still set at this level, so done. */ 1929 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1930 return; 1931 } 1932 rnp->completedqs = rnp->gp_seq; 1933 mask = rnp->grpmask; 1934 if (rnp->parent == NULL) { 1935 1936 /* No more levels. Exit loop holding root lock. */ 1937 1938 break; 1939 } 1940 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1941 rnp_c = rnp; 1942 rnp = rnp->parent; 1943 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1944 oldmask = READ_ONCE(rnp_c->qsmask); 1945 } 1946 1947 /* 1948 * Get here if we are the last CPU to pass through a quiescent 1949 * state for this grace period. Invoke rcu_report_qs_rsp() 1950 * to clean up and start the next grace period if one is needed. 1951 */ 1952 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 1953 } 1954 1955 /* 1956 * Record a quiescent state for all tasks that were previously queued 1957 * on the specified rcu_node structure and that were blocking the current 1958 * RCU grace period. The caller must hold the corresponding rnp->lock with 1959 * irqs disabled, and this lock is released upon return, but irqs remain 1960 * disabled. 1961 */ 1962 static void __maybe_unused 1963 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 1964 __releases(rnp->lock) 1965 { 1966 unsigned long gps; 1967 unsigned long mask; 1968 struct rcu_node *rnp_p; 1969 1970 raw_lockdep_assert_held_rcu_node(rnp); 1971 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 1972 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 1973 rnp->qsmask != 0) { 1974 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1975 return; /* Still need more quiescent states! */ 1976 } 1977 1978 rnp->completedqs = rnp->gp_seq; 1979 rnp_p = rnp->parent; 1980 if (rnp_p == NULL) { 1981 /* 1982 * Only one rcu_node structure in the tree, so don't 1983 * try to report up to its nonexistent parent! 1984 */ 1985 rcu_report_qs_rsp(flags); 1986 return; 1987 } 1988 1989 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 1990 gps = rnp->gp_seq; 1991 mask = rnp->grpmask; 1992 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1993 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 1994 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 1995 } 1996 1997 /* 1998 * Record a quiescent state for the specified CPU to that CPU's rcu_data 1999 * structure. This must be called from the specified CPU. 2000 */ 2001 static void 2002 rcu_report_qs_rdp(struct rcu_data *rdp) 2003 { 2004 unsigned long flags; 2005 unsigned long mask; 2006 bool needwake = false; 2007 bool needacc = false; 2008 struct rcu_node *rnp; 2009 2010 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); 2011 rnp = rdp->mynode; 2012 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2013 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2014 rdp->gpwrap) { 2015 2016 /* 2017 * The grace period in which this quiescent state was 2018 * recorded has ended, so don't report it upwards. 2019 * We will instead need a new quiescent state that lies 2020 * within the current grace period. 2021 */ 2022 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2023 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2024 return; 2025 } 2026 mask = rdp->grpmask; 2027 rdp->core_needs_qs = false; 2028 if ((rnp->qsmask & mask) == 0) { 2029 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2030 } else { 2031 /* 2032 * This GP can't end until cpu checks in, so all of our 2033 * callbacks can be processed during the next GP. 2034 * 2035 * NOCB kthreads have their own way to deal with that... 2036 */ 2037 if (!rcu_rdp_is_offloaded(rdp)) { 2038 needwake = rcu_accelerate_cbs(rnp, rdp); 2039 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { 2040 /* 2041 * ...but NOCB kthreads may miss or delay callbacks acceleration 2042 * if in the middle of a (de-)offloading process. 2043 */ 2044 needacc = true; 2045 } 2046 2047 rcu_disable_urgency_upon_qs(rdp); 2048 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2049 /* ^^^ Released rnp->lock */ 2050 if (needwake) 2051 rcu_gp_kthread_wake(); 2052 2053 if (needacc) { 2054 rcu_nocb_lock_irqsave(rdp, flags); 2055 rcu_accelerate_cbs_unlocked(rnp, rdp); 2056 rcu_nocb_unlock_irqrestore(rdp, flags); 2057 } 2058 } 2059 } 2060 2061 /* 2062 * Check to see if there is a new grace period of which this CPU 2063 * is not yet aware, and if so, set up local rcu_data state for it. 2064 * Otherwise, see if this CPU has just passed through its first 2065 * quiescent state for this grace period, and record that fact if so. 2066 */ 2067 static void 2068 rcu_check_quiescent_state(struct rcu_data *rdp) 2069 { 2070 /* Check for grace-period ends and beginnings. */ 2071 note_gp_changes(rdp); 2072 2073 /* 2074 * Does this CPU still need to do its part for current grace period? 2075 * If no, return and let the other CPUs do their part as well. 2076 */ 2077 if (!rdp->core_needs_qs) 2078 return; 2079 2080 /* 2081 * Was there a quiescent state since the beginning of the grace 2082 * period? If no, then exit and wait for the next call. 2083 */ 2084 if (rdp->cpu_no_qs.b.norm) 2085 return; 2086 2087 /* 2088 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2089 * judge of that). 2090 */ 2091 rcu_report_qs_rdp(rdp); 2092 } 2093 2094 /* 2095 * Near the end of the offline process. Trace the fact that this CPU 2096 * is going offline. 2097 */ 2098 int rcutree_dying_cpu(unsigned int cpu) 2099 { 2100 bool blkd; 2101 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2102 struct rcu_node *rnp = rdp->mynode; 2103 2104 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2105 return 0; 2106 2107 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); 2108 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 2109 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); 2110 return 0; 2111 } 2112 2113 /* 2114 * All CPUs for the specified rcu_node structure have gone offline, 2115 * and all tasks that were preempted within an RCU read-side critical 2116 * section while running on one of those CPUs have since exited their RCU 2117 * read-side critical section. Some other CPU is reporting this fact with 2118 * the specified rcu_node structure's ->lock held and interrupts disabled. 2119 * This function therefore goes up the tree of rcu_node structures, 2120 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2121 * the leaf rcu_node structure's ->qsmaskinit field has already been 2122 * updated. 2123 * 2124 * This function does check that the specified rcu_node structure has 2125 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2126 * prematurely. That said, invoking it after the fact will cost you 2127 * a needless lock acquisition. So once it has done its work, don't 2128 * invoke it again. 2129 */ 2130 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2131 { 2132 long mask; 2133 struct rcu_node *rnp = rnp_leaf; 2134 2135 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2136 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2137 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2138 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2139 return; 2140 for (;;) { 2141 mask = rnp->grpmask; 2142 rnp = rnp->parent; 2143 if (!rnp) 2144 break; 2145 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2146 rnp->qsmaskinit &= ~mask; 2147 /* Between grace periods, so better already be zero! */ 2148 WARN_ON_ONCE(rnp->qsmask); 2149 if (rnp->qsmaskinit) { 2150 raw_spin_unlock_rcu_node(rnp); 2151 /* irqs remain disabled. */ 2152 return; 2153 } 2154 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2155 } 2156 } 2157 2158 /* 2159 * The CPU has been completely removed, and some other CPU is reporting 2160 * this fact from process context. Do the remainder of the cleanup. 2161 * There can only be one CPU hotplug operation at a time, so no need for 2162 * explicit locking. 2163 */ 2164 int rcutree_dead_cpu(unsigned int cpu) 2165 { 2166 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2167 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2168 2169 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2170 return 0; 2171 2172 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); 2173 /* Adjust any no-longer-needed kthreads. */ 2174 rcu_boost_kthread_setaffinity(rnp, -1); 2175 // Stop-machine done, so allow nohz_full to disable tick. 2176 tick_dep_clear(TICK_DEP_BIT_RCU); 2177 return 0; 2178 } 2179 2180 /* 2181 * Invoke any RCU callbacks that have made it to the end of their grace 2182 * period. Throttle as specified by rdp->blimit. 2183 */ 2184 static void rcu_do_batch(struct rcu_data *rdp) 2185 { 2186 int div; 2187 bool __maybe_unused empty; 2188 unsigned long flags; 2189 struct rcu_head *rhp; 2190 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2191 long bl, count = 0; 2192 long pending, tlimit = 0; 2193 2194 /* If no callbacks are ready, just return. */ 2195 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2196 trace_rcu_batch_start(rcu_state.name, 2197 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2198 trace_rcu_batch_end(rcu_state.name, 0, 2199 !rcu_segcblist_empty(&rdp->cblist), 2200 need_resched(), is_idle_task(current), 2201 rcu_is_callbacks_kthread(rdp)); 2202 return; 2203 } 2204 2205 /* 2206 * Extract the list of ready callbacks, disabling IRQs to prevent 2207 * races with call_rcu() from interrupt handlers. Leave the 2208 * callback counts, as rcu_barrier() needs to be conservative. 2209 */ 2210 rcu_nocb_lock_irqsave(rdp, flags); 2211 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2212 pending = rcu_segcblist_n_cbs(&rdp->cblist); 2213 div = READ_ONCE(rcu_divisor); 2214 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; 2215 bl = max(rdp->blimit, pending >> div); 2216 if (in_serving_softirq() && unlikely(bl > 100)) { 2217 long rrn = READ_ONCE(rcu_resched_ns); 2218 2219 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; 2220 tlimit = local_clock() + rrn; 2221 } 2222 trace_rcu_batch_start(rcu_state.name, 2223 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2224 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2225 if (rcu_rdp_is_offloaded(rdp)) 2226 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2227 2228 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); 2229 rcu_nocb_unlock_irqrestore(rdp, flags); 2230 2231 /* Invoke callbacks. */ 2232 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2233 rhp = rcu_cblist_dequeue(&rcl); 2234 2235 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2236 rcu_callback_t f; 2237 2238 count++; 2239 debug_rcu_head_unqueue(rhp); 2240 2241 rcu_lock_acquire(&rcu_callback_map); 2242 trace_rcu_invoke_callback(rcu_state.name, rhp); 2243 2244 f = rhp->func; 2245 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2246 f(rhp); 2247 2248 rcu_lock_release(&rcu_callback_map); 2249 2250 /* 2251 * Stop only if limit reached and CPU has something to do. 2252 */ 2253 if (in_serving_softirq()) { 2254 if (count >= bl && (need_resched() || !is_idle_task(current))) 2255 break; 2256 /* 2257 * Make sure we don't spend too much time here and deprive other 2258 * softirq vectors of CPU cycles. 2259 */ 2260 if (unlikely(tlimit)) { 2261 /* only call local_clock() every 32 callbacks */ 2262 if (likely((count & 31) || local_clock() < tlimit)) 2263 continue; 2264 /* Exceeded the time limit, so leave. */ 2265 break; 2266 } 2267 } else { 2268 local_bh_enable(); 2269 lockdep_assert_irqs_enabled(); 2270 cond_resched_tasks_rcu_qs(); 2271 lockdep_assert_irqs_enabled(); 2272 local_bh_disable(); 2273 } 2274 } 2275 2276 rcu_nocb_lock_irqsave(rdp, flags); 2277 rdp->n_cbs_invoked += count; 2278 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2279 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); 2280 2281 /* Update counts and requeue any remaining callbacks. */ 2282 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2283 rcu_segcblist_add_len(&rdp->cblist, -count); 2284 2285 /* Reinstate batch limit if we have worked down the excess. */ 2286 count = rcu_segcblist_n_cbs(&rdp->cblist); 2287 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2288 rdp->blimit = blimit; 2289 2290 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2291 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2292 rdp->qlen_last_fqs_check = 0; 2293 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 2294 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2295 rdp->qlen_last_fqs_check = count; 2296 2297 /* 2298 * The following usually indicates a double call_rcu(). To track 2299 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2300 */ 2301 empty = rcu_segcblist_empty(&rdp->cblist); 2302 WARN_ON_ONCE(count == 0 && !empty); 2303 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2304 count != 0 && empty); 2305 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); 2306 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); 2307 2308 rcu_nocb_unlock_irqrestore(rdp, flags); 2309 2310 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2311 } 2312 2313 /* 2314 * This function is invoked from each scheduling-clock interrupt, 2315 * and checks to see if this CPU is in a non-context-switch quiescent 2316 * state, for example, user mode or idle loop. It also schedules RCU 2317 * core processing. If the current grace period has gone on too long, 2318 * it will ask the scheduler to manufacture a context switch for the sole 2319 * purpose of providing the needed quiescent state. 2320 */ 2321 void rcu_sched_clock_irq(int user) 2322 { 2323 unsigned long j; 2324 2325 if (IS_ENABLED(CONFIG_PROVE_RCU)) { 2326 j = jiffies; 2327 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); 2328 __this_cpu_write(rcu_data.last_sched_clock, j); 2329 } 2330 trace_rcu_utilization(TPS("Start scheduler-tick")); 2331 lockdep_assert_irqs_disabled(); 2332 raw_cpu_inc(rcu_data.ticks_this_gp); 2333 /* The load-acquire pairs with the store-release setting to true. */ 2334 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2335 /* Idle and userspace execution already are quiescent states. */ 2336 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2337 set_tsk_need_resched(current); 2338 set_preempt_need_resched(); 2339 } 2340 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2341 } 2342 rcu_flavor_sched_clock_irq(user); 2343 if (rcu_pending(user)) 2344 invoke_rcu_core(); 2345 if (user || rcu_is_cpu_rrupt_from_idle()) 2346 rcu_note_voluntary_context_switch(current); 2347 lockdep_assert_irqs_disabled(); 2348 2349 trace_rcu_utilization(TPS("End scheduler-tick")); 2350 } 2351 2352 /* 2353 * Scan the leaf rcu_node structures. For each structure on which all 2354 * CPUs have reported a quiescent state and on which there are tasks 2355 * blocking the current grace period, initiate RCU priority boosting. 2356 * Otherwise, invoke the specified function to check dyntick state for 2357 * each CPU that has not yet reported a quiescent state. 2358 */ 2359 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2360 { 2361 int cpu; 2362 unsigned long flags; 2363 unsigned long mask; 2364 struct rcu_data *rdp; 2365 struct rcu_node *rnp; 2366 2367 rcu_state.cbovld = rcu_state.cbovldnext; 2368 rcu_state.cbovldnext = false; 2369 rcu_for_each_leaf_node(rnp) { 2370 cond_resched_tasks_rcu_qs(); 2371 mask = 0; 2372 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2373 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2374 if (rnp->qsmask == 0) { 2375 if (rcu_preempt_blocked_readers_cgp(rnp)) { 2376 /* 2377 * No point in scanning bits because they 2378 * are all zero. But we might need to 2379 * priority-boost blocked readers. 2380 */ 2381 rcu_initiate_boost(rnp, flags); 2382 /* rcu_initiate_boost() releases rnp->lock */ 2383 continue; 2384 } 2385 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2386 continue; 2387 } 2388 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2389 rdp = per_cpu_ptr(&rcu_data, cpu); 2390 if (f(rdp)) { 2391 mask |= rdp->grpmask; 2392 rcu_disable_urgency_upon_qs(rdp); 2393 } 2394 } 2395 if (mask != 0) { 2396 /* Idle/offline CPUs, report (releases rnp->lock). */ 2397 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2398 } else { 2399 /* Nothing to do here, so just drop the lock. */ 2400 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2401 } 2402 } 2403 } 2404 2405 /* 2406 * Force quiescent states on reluctant CPUs, and also detect which 2407 * CPUs are in dyntick-idle mode. 2408 */ 2409 void rcu_force_quiescent_state(void) 2410 { 2411 unsigned long flags; 2412 bool ret; 2413 struct rcu_node *rnp; 2414 struct rcu_node *rnp_old = NULL; 2415 2416 /* Funnel through hierarchy to reduce memory contention. */ 2417 rnp = raw_cpu_read(rcu_data.mynode); 2418 for (; rnp != NULL; rnp = rnp->parent) { 2419 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2420 !raw_spin_trylock(&rnp->fqslock); 2421 if (rnp_old != NULL) 2422 raw_spin_unlock(&rnp_old->fqslock); 2423 if (ret) 2424 return; 2425 rnp_old = rnp; 2426 } 2427 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2428 2429 /* Reached the root of the rcu_node tree, acquire lock. */ 2430 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2431 raw_spin_unlock(&rnp_old->fqslock); 2432 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2433 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2434 return; /* Someone beat us to it. */ 2435 } 2436 WRITE_ONCE(rcu_state.gp_flags, 2437 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2438 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2439 rcu_gp_kthread_wake(); 2440 } 2441 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2442 2443 // Workqueue handler for an RCU reader for kernels enforcing struct RCU 2444 // grace periods. 2445 static void strict_work_handler(struct work_struct *work) 2446 { 2447 rcu_read_lock(); 2448 rcu_read_unlock(); 2449 } 2450 2451 /* Perform RCU core processing work for the current CPU. */ 2452 static __latent_entropy void rcu_core(void) 2453 { 2454 unsigned long flags; 2455 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2456 struct rcu_node *rnp = rdp->mynode; 2457 /* 2458 * On RT rcu_core() can be preempted when IRQs aren't disabled. 2459 * Therefore this function can race with concurrent NOCB (de-)offloading 2460 * on this CPU and the below condition must be considered volatile. 2461 * However if we race with: 2462 * 2463 * _ Offloading: In the worst case we accelerate or process callbacks 2464 * concurrently with NOCB kthreads. We are guaranteed to 2465 * call rcu_nocb_lock() if that happens. 2466 * 2467 * _ Deoffloading: In the worst case we miss callbacks acceleration or 2468 * processing. This is fine because the early stage 2469 * of deoffloading invokes rcu_core() after setting 2470 * SEGCBLIST_RCU_CORE. So we guarantee that we'll process 2471 * what could have been dismissed without the need to wait 2472 * for the next rcu_pending() check in the next jiffy. 2473 */ 2474 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); 2475 2476 if (cpu_is_offline(smp_processor_id())) 2477 return; 2478 trace_rcu_utilization(TPS("Start RCU core")); 2479 WARN_ON_ONCE(!rdp->beenonline); 2480 2481 /* Report any deferred quiescent states if preemption enabled. */ 2482 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) { 2483 rcu_preempt_deferred_qs(current); 2484 } else if (rcu_preempt_need_deferred_qs(current)) { 2485 set_tsk_need_resched(current); 2486 set_preempt_need_resched(); 2487 } 2488 2489 /* Update RCU state based on any recent quiescent states. */ 2490 rcu_check_quiescent_state(rdp); 2491 2492 /* No grace period and unregistered callbacks? */ 2493 if (!rcu_gp_in_progress() && 2494 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { 2495 rcu_nocb_lock_irqsave(rdp, flags); 2496 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2497 rcu_accelerate_cbs_unlocked(rnp, rdp); 2498 rcu_nocb_unlock_irqrestore(rdp, flags); 2499 } 2500 2501 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2502 2503 /* If there are callbacks ready, invoke them. */ 2504 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && 2505 likely(READ_ONCE(rcu_scheduler_fully_active))) { 2506 rcu_do_batch(rdp); 2507 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2508 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2509 invoke_rcu_core(); 2510 } 2511 2512 /* Do any needed deferred wakeups of rcuo kthreads. */ 2513 do_nocb_deferred_wakeup(rdp); 2514 trace_rcu_utilization(TPS("End RCU core")); 2515 2516 // If strict GPs, schedule an RCU reader in a clean environment. 2517 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2518 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); 2519 } 2520 2521 static void rcu_core_si(struct softirq_action *h) 2522 { 2523 rcu_core(); 2524 } 2525 2526 static void rcu_wake_cond(struct task_struct *t, int status) 2527 { 2528 /* 2529 * If the thread is yielding, only wake it when this 2530 * is invoked from idle 2531 */ 2532 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2533 wake_up_process(t); 2534 } 2535 2536 static void invoke_rcu_core_kthread(void) 2537 { 2538 struct task_struct *t; 2539 unsigned long flags; 2540 2541 local_irq_save(flags); 2542 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2543 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2544 if (t != NULL && t != current) 2545 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2546 local_irq_restore(flags); 2547 } 2548 2549 /* 2550 * Wake up this CPU's rcuc kthread to do RCU core processing. 2551 */ 2552 static void invoke_rcu_core(void) 2553 { 2554 if (!cpu_online(smp_processor_id())) 2555 return; 2556 if (use_softirq) 2557 raise_softirq(RCU_SOFTIRQ); 2558 else 2559 invoke_rcu_core_kthread(); 2560 } 2561 2562 static void rcu_cpu_kthread_park(unsigned int cpu) 2563 { 2564 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2565 } 2566 2567 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2568 { 2569 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2570 } 2571 2572 /* 2573 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2574 * the RCU softirq used in configurations of RCU that do not support RCU 2575 * priority boosting. 2576 */ 2577 static void rcu_cpu_kthread(unsigned int cpu) 2578 { 2579 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2580 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2581 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity); 2582 int spincnt; 2583 2584 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2585 for (spincnt = 0; spincnt < 10; spincnt++) { 2586 WRITE_ONCE(*j, jiffies); 2587 local_bh_disable(); 2588 *statusp = RCU_KTHREAD_RUNNING; 2589 local_irq_disable(); 2590 work = *workp; 2591 *workp = 0; 2592 local_irq_enable(); 2593 if (work) 2594 rcu_core(); 2595 local_bh_enable(); 2596 if (*workp == 0) { 2597 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2598 *statusp = RCU_KTHREAD_WAITING; 2599 return; 2600 } 2601 } 2602 *statusp = RCU_KTHREAD_YIELDING; 2603 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2604 schedule_timeout_idle(2); 2605 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2606 *statusp = RCU_KTHREAD_WAITING; 2607 WRITE_ONCE(*j, jiffies); 2608 } 2609 2610 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2611 .store = &rcu_data.rcu_cpu_kthread_task, 2612 .thread_should_run = rcu_cpu_kthread_should_run, 2613 .thread_fn = rcu_cpu_kthread, 2614 .thread_comm = "rcuc/%u", 2615 .setup = rcu_cpu_kthread_setup, 2616 .park = rcu_cpu_kthread_park, 2617 }; 2618 2619 /* 2620 * Spawn per-CPU RCU core processing kthreads. 2621 */ 2622 static int __init rcu_spawn_core_kthreads(void) 2623 { 2624 int cpu; 2625 2626 for_each_possible_cpu(cpu) 2627 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2628 if (use_softirq) 2629 return 0; 2630 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2631 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2632 return 0; 2633 } 2634 2635 /* 2636 * Handle any core-RCU processing required by a call_rcu() invocation. 2637 */ 2638 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2639 unsigned long flags) 2640 { 2641 /* 2642 * If called from an extended quiescent state, invoke the RCU 2643 * core in order to force a re-evaluation of RCU's idleness. 2644 */ 2645 if (!rcu_is_watching()) 2646 invoke_rcu_core(); 2647 2648 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2649 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2650 return; 2651 2652 /* 2653 * Force the grace period if too many callbacks or too long waiting. 2654 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2655 * if some other CPU has recently done so. Also, don't bother 2656 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2657 * is the only one waiting for a grace period to complete. 2658 */ 2659 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2660 rdp->qlen_last_fqs_check + qhimark)) { 2661 2662 /* Are we ignoring a completed grace period? */ 2663 note_gp_changes(rdp); 2664 2665 /* Start a new grace period if one not already started. */ 2666 if (!rcu_gp_in_progress()) { 2667 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2668 } else { 2669 /* Give the grace period a kick. */ 2670 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 2671 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && 2672 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2673 rcu_force_quiescent_state(); 2674 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 2675 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2676 } 2677 } 2678 } 2679 2680 /* 2681 * RCU callback function to leak a callback. 2682 */ 2683 static void rcu_leak_callback(struct rcu_head *rhp) 2684 { 2685 } 2686 2687 /* 2688 * Check and if necessary update the leaf rcu_node structure's 2689 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2690 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 2691 * structure's ->lock. 2692 */ 2693 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 2694 { 2695 raw_lockdep_assert_held_rcu_node(rnp); 2696 if (qovld_calc <= 0) 2697 return; // Early boot and wildcard value set. 2698 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 2699 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 2700 else 2701 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 2702 } 2703 2704 /* 2705 * Check and if necessary update the leaf rcu_node structure's 2706 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2707 * number of queued RCU callbacks. No locks need be held, but the 2708 * caller must have disabled interrupts. 2709 * 2710 * Note that this function ignores the possibility that there are a lot 2711 * of callbacks all of which have already seen the end of their respective 2712 * grace periods. This omission is due to the need for no-CBs CPUs to 2713 * be holding ->nocb_lock to do this check, which is too heavy for a 2714 * common-case operation. 2715 */ 2716 static void check_cb_ovld(struct rcu_data *rdp) 2717 { 2718 struct rcu_node *const rnp = rdp->mynode; 2719 2720 if (qovld_calc <= 0 || 2721 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 2722 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 2723 return; // Early boot wildcard value or already set correctly. 2724 raw_spin_lock_rcu_node(rnp); 2725 check_cb_ovld_locked(rdp, rnp); 2726 raw_spin_unlock_rcu_node(rnp); 2727 } 2728 2729 static void 2730 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy) 2731 { 2732 static atomic_t doublefrees; 2733 unsigned long flags; 2734 struct rcu_data *rdp; 2735 bool was_alldone; 2736 2737 /* Misaligned rcu_head! */ 2738 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2739 2740 if (debug_rcu_head_queue(head)) { 2741 /* 2742 * Probable double call_rcu(), so leak the callback. 2743 * Use rcu:rcu_callback trace event to find the previous 2744 * time callback was passed to call_rcu(). 2745 */ 2746 if (atomic_inc_return(&doublefrees) < 4) { 2747 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); 2748 mem_dump_obj(head); 2749 } 2750 WRITE_ONCE(head->func, rcu_leak_callback); 2751 return; 2752 } 2753 head->func = func; 2754 head->next = NULL; 2755 kasan_record_aux_stack_noalloc(head); 2756 local_irq_save(flags); 2757 rdp = this_cpu_ptr(&rcu_data); 2758 2759 /* Add the callback to our list. */ 2760 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 2761 // This can trigger due to call_rcu() from offline CPU: 2762 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 2763 WARN_ON_ONCE(!rcu_is_watching()); 2764 // Very early boot, before rcu_init(). Initialize if needed 2765 // and then drop through to queue the callback. 2766 if (rcu_segcblist_empty(&rdp->cblist)) 2767 rcu_segcblist_init(&rdp->cblist); 2768 } 2769 2770 check_cb_ovld(rdp); 2771 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) 2772 return; // Enqueued onto ->nocb_bypass, so just leave. 2773 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. 2774 rcu_segcblist_enqueue(&rdp->cblist, head); 2775 if (__is_kvfree_rcu_offset((unsigned long)func)) 2776 trace_rcu_kvfree_callback(rcu_state.name, head, 2777 (unsigned long)func, 2778 rcu_segcblist_n_cbs(&rdp->cblist)); 2779 else 2780 trace_rcu_callback(rcu_state.name, head, 2781 rcu_segcblist_n_cbs(&rdp->cblist)); 2782 2783 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); 2784 2785 /* Go handle any RCU core processing required. */ 2786 if (unlikely(rcu_rdp_is_offloaded(rdp))) { 2787 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2788 } else { 2789 __call_rcu_core(rdp, head, flags); 2790 local_irq_restore(flags); 2791 } 2792 } 2793 2794 #ifdef CONFIG_RCU_LAZY 2795 /** 2796 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and 2797 * flush all lazy callbacks (including the new one) to the main ->cblist while 2798 * doing so. 2799 * 2800 * @head: structure to be used for queueing the RCU updates. 2801 * @func: actual callback function to be invoked after the grace period 2802 * 2803 * The callback function will be invoked some time after a full grace 2804 * period elapses, in other words after all pre-existing RCU read-side 2805 * critical sections have completed. 2806 * 2807 * Use this API instead of call_rcu() if you don't want the callback to be 2808 * invoked after very long periods of time, which can happen on systems without 2809 * memory pressure and on systems which are lightly loaded or mostly idle. 2810 * This function will cause callbacks to be invoked sooner than later at the 2811 * expense of extra power. Other than that, this function is identical to, and 2812 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory 2813 * ordering and other functionality. 2814 */ 2815 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) 2816 { 2817 return __call_rcu_common(head, func, false); 2818 } 2819 EXPORT_SYMBOL_GPL(call_rcu_hurry); 2820 #endif 2821 2822 /** 2823 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2824 * By default the callbacks are 'lazy' and are kept hidden from the main 2825 * ->cblist to prevent starting of grace periods too soon. 2826 * If you desire grace periods to start very soon, use call_rcu_hurry(). 2827 * 2828 * @head: structure to be used for queueing the RCU updates. 2829 * @func: actual callback function to be invoked after the grace period 2830 * 2831 * The callback function will be invoked some time after a full grace 2832 * period elapses, in other words after all pre-existing RCU read-side 2833 * critical sections have completed. However, the callback function 2834 * might well execute concurrently with RCU read-side critical sections 2835 * that started after call_rcu() was invoked. 2836 * 2837 * RCU read-side critical sections are delimited by rcu_read_lock() 2838 * and rcu_read_unlock(), and may be nested. In addition, but only in 2839 * v5.0 and later, regions of code across which interrupts, preemption, 2840 * or softirqs have been disabled also serve as RCU read-side critical 2841 * sections. This includes hardware interrupt handlers, softirq handlers, 2842 * and NMI handlers. 2843 * 2844 * Note that all CPUs must agree that the grace period extended beyond 2845 * all pre-existing RCU read-side critical section. On systems with more 2846 * than one CPU, this means that when "func()" is invoked, each CPU is 2847 * guaranteed to have executed a full memory barrier since the end of its 2848 * last RCU read-side critical section whose beginning preceded the call 2849 * to call_rcu(). It also means that each CPU executing an RCU read-side 2850 * critical section that continues beyond the start of "func()" must have 2851 * executed a memory barrier after the call_rcu() but before the beginning 2852 * of that RCU read-side critical section. Note that these guarantees 2853 * include CPUs that are offline, idle, or executing in user mode, as 2854 * well as CPUs that are executing in the kernel. 2855 * 2856 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 2857 * resulting RCU callback function "func()", then both CPU A and CPU B are 2858 * guaranteed to execute a full memory barrier during the time interval 2859 * between the call to call_rcu() and the invocation of "func()" -- even 2860 * if CPU A and CPU B are the same CPU (but again only if the system has 2861 * more than one CPU). 2862 * 2863 * Implementation of these memory-ordering guarantees is described here: 2864 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 2865 */ 2866 void call_rcu(struct rcu_head *head, rcu_callback_t func) 2867 { 2868 return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY)); 2869 } 2870 EXPORT_SYMBOL_GPL(call_rcu); 2871 2872 /* Maximum number of jiffies to wait before draining a batch. */ 2873 #define KFREE_DRAIN_JIFFIES (5 * HZ) 2874 #define KFREE_N_BATCHES 2 2875 #define FREE_N_CHANNELS 2 2876 2877 /** 2878 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers 2879 * @nr_records: Number of active pointers in the array 2880 * @next: Next bulk object in the block chain 2881 * @records: Array of the kvfree_rcu() pointers 2882 */ 2883 struct kvfree_rcu_bulk_data { 2884 unsigned long nr_records; 2885 struct kvfree_rcu_bulk_data *next; 2886 void *records[]; 2887 }; 2888 2889 /* 2890 * This macro defines how many entries the "records" array 2891 * will contain. It is based on the fact that the size of 2892 * kvfree_rcu_bulk_data structure becomes exactly one page. 2893 */ 2894 #define KVFREE_BULK_MAX_ENTR \ 2895 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *)) 2896 2897 /** 2898 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests 2899 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period 2900 * @head_free: List of kfree_rcu() objects waiting for a grace period 2901 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period 2902 * @krcp: Pointer to @kfree_rcu_cpu structure 2903 */ 2904 2905 struct kfree_rcu_cpu_work { 2906 struct rcu_work rcu_work; 2907 struct rcu_head *head_free; 2908 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS]; 2909 struct kfree_rcu_cpu *krcp; 2910 }; 2911 2912 /** 2913 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period 2914 * @head: List of kfree_rcu() objects not yet waiting for a grace period 2915 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period 2916 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period 2917 * @lock: Synchronize access to this structure 2918 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES 2919 * @initialized: The @rcu_work fields have been initialized 2920 * @count: Number of objects for which GP not started 2921 * @bkvcache: 2922 * A simple cache list that contains objects for reuse purpose. 2923 * In order to save some per-cpu space the list is singular. 2924 * Even though it is lockless an access has to be protected by the 2925 * per-cpu lock. 2926 * @page_cache_work: A work to refill the cache when it is empty 2927 * @backoff_page_cache_fill: Delay cache refills 2928 * @work_in_progress: Indicates that page_cache_work is running 2929 * @hrtimer: A hrtimer for scheduling a page_cache_work 2930 * @nr_bkv_objs: number of allocated objects at @bkvcache. 2931 * 2932 * This is a per-CPU structure. The reason that it is not included in 2933 * the rcu_data structure is to permit this code to be extracted from 2934 * the RCU files. Such extraction could allow further optimization of 2935 * the interactions with the slab allocators. 2936 */ 2937 struct kfree_rcu_cpu { 2938 struct rcu_head *head; 2939 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS]; 2940 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; 2941 raw_spinlock_t lock; 2942 struct delayed_work monitor_work; 2943 bool initialized; 2944 int count; 2945 2946 struct delayed_work page_cache_work; 2947 atomic_t backoff_page_cache_fill; 2948 atomic_t work_in_progress; 2949 struct hrtimer hrtimer; 2950 2951 struct llist_head bkvcache; 2952 int nr_bkv_objs; 2953 }; 2954 2955 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = { 2956 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock), 2957 }; 2958 2959 static __always_inline void 2960 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead) 2961 { 2962 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2963 int i; 2964 2965 for (i = 0; i < bhead->nr_records; i++) 2966 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i])); 2967 #endif 2968 } 2969 2970 static inline struct kfree_rcu_cpu * 2971 krc_this_cpu_lock(unsigned long *flags) 2972 { 2973 struct kfree_rcu_cpu *krcp; 2974 2975 local_irq_save(*flags); // For safely calling this_cpu_ptr(). 2976 krcp = this_cpu_ptr(&krc); 2977 raw_spin_lock(&krcp->lock); 2978 2979 return krcp; 2980 } 2981 2982 static inline void 2983 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) 2984 { 2985 raw_spin_unlock_irqrestore(&krcp->lock, flags); 2986 } 2987 2988 static inline struct kvfree_rcu_bulk_data * 2989 get_cached_bnode(struct kfree_rcu_cpu *krcp) 2990 { 2991 if (!krcp->nr_bkv_objs) 2992 return NULL; 2993 2994 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); 2995 return (struct kvfree_rcu_bulk_data *) 2996 llist_del_first(&krcp->bkvcache); 2997 } 2998 2999 static inline bool 3000 put_cached_bnode(struct kfree_rcu_cpu *krcp, 3001 struct kvfree_rcu_bulk_data *bnode) 3002 { 3003 // Check the limit. 3004 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) 3005 return false; 3006 3007 llist_add((struct llist_node *) bnode, &krcp->bkvcache); 3008 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); 3009 return true; 3010 } 3011 3012 static int 3013 drain_page_cache(struct kfree_rcu_cpu *krcp) 3014 { 3015 unsigned long flags; 3016 struct llist_node *page_list, *pos, *n; 3017 int freed = 0; 3018 3019 raw_spin_lock_irqsave(&krcp->lock, flags); 3020 page_list = llist_del_all(&krcp->bkvcache); 3021 WRITE_ONCE(krcp->nr_bkv_objs, 0); 3022 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3023 3024 llist_for_each_safe(pos, n, page_list) { 3025 free_page((unsigned long)pos); 3026 freed++; 3027 } 3028 3029 return freed; 3030 } 3031 3032 /* 3033 * This function is invoked in workqueue context after a grace period. 3034 * It frees all the objects queued on ->bkvhead_free or ->head_free. 3035 */ 3036 static void kfree_rcu_work(struct work_struct *work) 3037 { 3038 unsigned long flags; 3039 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext; 3040 struct rcu_head *head, *next; 3041 struct kfree_rcu_cpu *krcp; 3042 struct kfree_rcu_cpu_work *krwp; 3043 int i, j; 3044 3045 krwp = container_of(to_rcu_work(work), 3046 struct kfree_rcu_cpu_work, rcu_work); 3047 krcp = krwp->krcp; 3048 3049 raw_spin_lock_irqsave(&krcp->lock, flags); 3050 // Channels 1 and 2. 3051 for (i = 0; i < FREE_N_CHANNELS; i++) { 3052 bkvhead[i] = krwp->bkvhead_free[i]; 3053 krwp->bkvhead_free[i] = NULL; 3054 } 3055 3056 // Channel 3. 3057 head = krwp->head_free; 3058 krwp->head_free = NULL; 3059 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3060 3061 // Handle the first two channels. 3062 for (i = 0; i < FREE_N_CHANNELS; i++) { 3063 for (; bkvhead[i]; bkvhead[i] = bnext) { 3064 bnext = bkvhead[i]->next; 3065 debug_rcu_bhead_unqueue(bkvhead[i]); 3066 3067 rcu_lock_acquire(&rcu_callback_map); 3068 if (i == 0) { // kmalloc() / kfree(). 3069 trace_rcu_invoke_kfree_bulk_callback( 3070 rcu_state.name, bkvhead[i]->nr_records, 3071 bkvhead[i]->records); 3072 3073 kfree_bulk(bkvhead[i]->nr_records, 3074 bkvhead[i]->records); 3075 } else { // vmalloc() / vfree(). 3076 for (j = 0; j < bkvhead[i]->nr_records; j++) { 3077 trace_rcu_invoke_kvfree_callback( 3078 rcu_state.name, 3079 bkvhead[i]->records[j], 0); 3080 3081 vfree(bkvhead[i]->records[j]); 3082 } 3083 } 3084 rcu_lock_release(&rcu_callback_map); 3085 3086 raw_spin_lock_irqsave(&krcp->lock, flags); 3087 if (put_cached_bnode(krcp, bkvhead[i])) 3088 bkvhead[i] = NULL; 3089 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3090 3091 if (bkvhead[i]) 3092 free_page((unsigned long) bkvhead[i]); 3093 3094 cond_resched_tasks_rcu_qs(); 3095 } 3096 } 3097 3098 /* 3099 * This is used when the "bulk" path can not be used for the 3100 * double-argument of kvfree_rcu(). This happens when the 3101 * page-cache is empty, which means that objects are instead 3102 * queued on a linked list through their rcu_head structures. 3103 * This list is named "Channel 3". 3104 */ 3105 for (; head; head = next) { 3106 unsigned long offset = (unsigned long)head->func; 3107 void *ptr = (void *)head - offset; 3108 3109 next = head->next; 3110 debug_rcu_head_unqueue((struct rcu_head *)ptr); 3111 rcu_lock_acquire(&rcu_callback_map); 3112 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); 3113 3114 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) 3115 kvfree(ptr); 3116 3117 rcu_lock_release(&rcu_callback_map); 3118 cond_resched_tasks_rcu_qs(); 3119 } 3120 } 3121 3122 static bool 3123 need_offload_krc(struct kfree_rcu_cpu *krcp) 3124 { 3125 int i; 3126 3127 for (i = 0; i < FREE_N_CHANNELS; i++) 3128 if (krcp->bkvhead[i]) 3129 return true; 3130 3131 return !!krcp->head; 3132 } 3133 3134 static void 3135 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) 3136 { 3137 long delay, delay_left; 3138 3139 delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES; 3140 if (delayed_work_pending(&krcp->monitor_work)) { 3141 delay_left = krcp->monitor_work.timer.expires - jiffies; 3142 if (delay < delay_left) 3143 mod_delayed_work(system_wq, &krcp->monitor_work, delay); 3144 return; 3145 } 3146 queue_delayed_work(system_wq, &krcp->monitor_work, delay); 3147 } 3148 3149 /* 3150 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. 3151 */ 3152 static void kfree_rcu_monitor(struct work_struct *work) 3153 { 3154 struct kfree_rcu_cpu *krcp = container_of(work, 3155 struct kfree_rcu_cpu, monitor_work.work); 3156 unsigned long flags; 3157 int i, j; 3158 3159 raw_spin_lock_irqsave(&krcp->lock, flags); 3160 3161 // Attempt to start a new batch. 3162 for (i = 0; i < KFREE_N_BATCHES; i++) { 3163 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); 3164 3165 // Try to detach bkvhead or head and attach it over any 3166 // available corresponding free channel. It can be that 3167 // a previous RCU batch is in progress, it means that 3168 // immediately to queue another one is not possible so 3169 // in that case the monitor work is rearmed. 3170 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || 3171 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || 3172 (krcp->head && !krwp->head_free)) { 3173 // Channel 1 corresponds to the SLAB-pointer bulk path. 3174 // Channel 2 corresponds to vmalloc-pointer bulk path. 3175 for (j = 0; j < FREE_N_CHANNELS; j++) { 3176 if (!krwp->bkvhead_free[j]) { 3177 krwp->bkvhead_free[j] = krcp->bkvhead[j]; 3178 krcp->bkvhead[j] = NULL; 3179 } 3180 } 3181 3182 // Channel 3 corresponds to both SLAB and vmalloc 3183 // objects queued on the linked list. 3184 if (!krwp->head_free) { 3185 krwp->head_free = krcp->head; 3186 krcp->head = NULL; 3187 } 3188 3189 WRITE_ONCE(krcp->count, 0); 3190 3191 // One work is per one batch, so there are three 3192 // "free channels", the batch can handle. It can 3193 // be that the work is in the pending state when 3194 // channels have been detached following by each 3195 // other. 3196 queue_rcu_work(system_wq, &krwp->rcu_work); 3197 } 3198 } 3199 3200 // If there is nothing to detach, it means that our job is 3201 // successfully done here. In case of having at least one 3202 // of the channels that is still busy we should rearm the 3203 // work to repeat an attempt. Because previous batches are 3204 // still in progress. 3205 if (need_offload_krc(krcp)) 3206 schedule_delayed_monitor_work(krcp); 3207 3208 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3209 } 3210 3211 static enum hrtimer_restart 3212 schedule_page_work_fn(struct hrtimer *t) 3213 { 3214 struct kfree_rcu_cpu *krcp = 3215 container_of(t, struct kfree_rcu_cpu, hrtimer); 3216 3217 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); 3218 return HRTIMER_NORESTART; 3219 } 3220 3221 static void fill_page_cache_func(struct work_struct *work) 3222 { 3223 struct kvfree_rcu_bulk_data *bnode; 3224 struct kfree_rcu_cpu *krcp = 3225 container_of(work, struct kfree_rcu_cpu, 3226 page_cache_work.work); 3227 unsigned long flags; 3228 int nr_pages; 3229 bool pushed; 3230 int i; 3231 3232 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? 3233 1 : rcu_min_cached_objs; 3234 3235 for (i = 0; i < nr_pages; i++) { 3236 bnode = (struct kvfree_rcu_bulk_data *) 3237 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 3238 3239 if (!bnode) 3240 break; 3241 3242 raw_spin_lock_irqsave(&krcp->lock, flags); 3243 pushed = put_cached_bnode(krcp, bnode); 3244 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3245 3246 if (!pushed) { 3247 free_page((unsigned long) bnode); 3248 break; 3249 } 3250 } 3251 3252 atomic_set(&krcp->work_in_progress, 0); 3253 atomic_set(&krcp->backoff_page_cache_fill, 0); 3254 } 3255 3256 static void 3257 run_page_cache_worker(struct kfree_rcu_cpu *krcp) 3258 { 3259 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3260 !atomic_xchg(&krcp->work_in_progress, 1)) { 3261 if (atomic_read(&krcp->backoff_page_cache_fill)) { 3262 queue_delayed_work(system_wq, 3263 &krcp->page_cache_work, 3264 msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); 3265 } else { 3266 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3267 krcp->hrtimer.function = schedule_page_work_fn; 3268 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); 3269 } 3270 } 3271 } 3272 3273 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock() 3274 // state specified by flags. If can_alloc is true, the caller must 3275 // be schedulable and not be holding any locks or mutexes that might be 3276 // acquired by the memory allocator or anything that it might invoke. 3277 // Returns true if ptr was successfully recorded, else the caller must 3278 // use a fallback. 3279 static inline bool 3280 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, 3281 unsigned long *flags, void *ptr, bool can_alloc) 3282 { 3283 struct kvfree_rcu_bulk_data *bnode; 3284 int idx; 3285 3286 *krcp = krc_this_cpu_lock(flags); 3287 if (unlikely(!(*krcp)->initialized)) 3288 return false; 3289 3290 idx = !!is_vmalloc_addr(ptr); 3291 3292 /* Check if a new block is required. */ 3293 if (!(*krcp)->bkvhead[idx] || 3294 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { 3295 bnode = get_cached_bnode(*krcp); 3296 if (!bnode && can_alloc) { 3297 krc_this_cpu_unlock(*krcp, *flags); 3298 3299 // __GFP_NORETRY - allows a light-weight direct reclaim 3300 // what is OK from minimizing of fallback hitting point of 3301 // view. Apart of that it forbids any OOM invoking what is 3302 // also beneficial since we are about to release memory soon. 3303 // 3304 // __GFP_NOMEMALLOC - prevents from consuming of all the 3305 // memory reserves. Please note we have a fallback path. 3306 // 3307 // __GFP_NOWARN - it is supposed that an allocation can 3308 // be failed under low memory or high memory pressure 3309 // scenarios. 3310 bnode = (struct kvfree_rcu_bulk_data *) 3311 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 3312 *krcp = krc_this_cpu_lock(flags); 3313 } 3314 3315 if (!bnode) 3316 return false; 3317 3318 /* Initialize the new block. */ 3319 bnode->nr_records = 0; 3320 bnode->next = (*krcp)->bkvhead[idx]; 3321 3322 /* Attach it to the head. */ 3323 (*krcp)->bkvhead[idx] = bnode; 3324 } 3325 3326 /* Finally insert. */ 3327 (*krcp)->bkvhead[idx]->records 3328 [(*krcp)->bkvhead[idx]->nr_records++] = ptr; 3329 3330 return true; 3331 } 3332 3333 /* 3334 * Queue a request for lazy invocation of the appropriate free routine 3335 * after a grace period. Please note that three paths are maintained, 3336 * two for the common case using arrays of pointers and a third one that 3337 * is used only when the main paths cannot be used, for example, due to 3338 * memory pressure. 3339 * 3340 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained 3341 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will 3342 * be free'd in workqueue context. This allows us to: batch requests together to 3343 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. 3344 */ 3345 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 3346 { 3347 unsigned long flags; 3348 struct kfree_rcu_cpu *krcp; 3349 bool success; 3350 void *ptr; 3351 3352 if (head) { 3353 ptr = (void *) head - (unsigned long) func; 3354 } else { 3355 /* 3356 * Please note there is a limitation for the head-less 3357 * variant, that is why there is a clear rule for such 3358 * objects: it can be used from might_sleep() context 3359 * only. For other places please embed an rcu_head to 3360 * your data. 3361 */ 3362 might_sleep(); 3363 ptr = (unsigned long *) func; 3364 } 3365 3366 // Queue the object but don't yet schedule the batch. 3367 if (debug_rcu_head_queue(ptr)) { 3368 // Probable double kfree_rcu(), just leak. 3369 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", 3370 __func__, head); 3371 3372 // Mark as success and leave. 3373 return; 3374 } 3375 3376 kasan_record_aux_stack_noalloc(ptr); 3377 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); 3378 if (!success) { 3379 run_page_cache_worker(krcp); 3380 3381 if (head == NULL) 3382 // Inline if kvfree_rcu(one_arg) call. 3383 goto unlock_return; 3384 3385 head->func = func; 3386 head->next = krcp->head; 3387 krcp->head = head; 3388 success = true; 3389 } 3390 3391 WRITE_ONCE(krcp->count, krcp->count + 1); 3392 3393 // Set timer to drain after KFREE_DRAIN_JIFFIES. 3394 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING) 3395 schedule_delayed_monitor_work(krcp); 3396 3397 unlock_return: 3398 krc_this_cpu_unlock(krcp, flags); 3399 3400 /* 3401 * Inline kvfree() after synchronize_rcu(). We can do 3402 * it from might_sleep() context only, so the current 3403 * CPU can pass the QS state. 3404 */ 3405 if (!success) { 3406 debug_rcu_head_unqueue((struct rcu_head *) ptr); 3407 synchronize_rcu(); 3408 kvfree(ptr); 3409 } 3410 } 3411 EXPORT_SYMBOL_GPL(kvfree_call_rcu); 3412 3413 static unsigned long 3414 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 3415 { 3416 int cpu; 3417 unsigned long count = 0; 3418 3419 /* Snapshot count of all CPUs */ 3420 for_each_possible_cpu(cpu) { 3421 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3422 3423 count += READ_ONCE(krcp->count); 3424 count += READ_ONCE(krcp->nr_bkv_objs); 3425 atomic_set(&krcp->backoff_page_cache_fill, 1); 3426 } 3427 3428 return count == 0 ? SHRINK_EMPTY : count; 3429 } 3430 3431 static unsigned long 3432 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 3433 { 3434 int cpu, freed = 0; 3435 3436 for_each_possible_cpu(cpu) { 3437 int count; 3438 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3439 3440 count = krcp->count; 3441 count += drain_page_cache(krcp); 3442 kfree_rcu_monitor(&krcp->monitor_work.work); 3443 3444 sc->nr_to_scan -= count; 3445 freed += count; 3446 3447 if (sc->nr_to_scan <= 0) 3448 break; 3449 } 3450 3451 return freed == 0 ? SHRINK_STOP : freed; 3452 } 3453 3454 static struct shrinker kfree_rcu_shrinker = { 3455 .count_objects = kfree_rcu_shrink_count, 3456 .scan_objects = kfree_rcu_shrink_scan, 3457 .batch = 0, 3458 .seeks = DEFAULT_SEEKS, 3459 }; 3460 3461 void __init kfree_rcu_scheduler_running(void) 3462 { 3463 int cpu; 3464 unsigned long flags; 3465 3466 for_each_possible_cpu(cpu) { 3467 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3468 3469 raw_spin_lock_irqsave(&krcp->lock, flags); 3470 if (need_offload_krc(krcp)) 3471 schedule_delayed_monitor_work(krcp); 3472 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3473 } 3474 } 3475 3476 /* 3477 * During early boot, any blocking grace-period wait automatically 3478 * implies a grace period. 3479 * 3480 * Later on, this could in theory be the case for kernels built with 3481 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this 3482 * is not a common case. Furthermore, this optimization would cause 3483 * the rcu_gp_oldstate structure to expand by 50%, so this potential 3484 * grace-period optimization is ignored once the scheduler is running. 3485 */ 3486 static int rcu_blocking_is_gp(void) 3487 { 3488 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 3489 return false; 3490 might_sleep(); /* Check for RCU read-side critical section. */ 3491 return true; 3492 } 3493 3494 /** 3495 * synchronize_rcu - wait until a grace period has elapsed. 3496 * 3497 * Control will return to the caller some time after a full grace 3498 * period has elapsed, in other words after all currently executing RCU 3499 * read-side critical sections have completed. Note, however, that 3500 * upon return from synchronize_rcu(), the caller might well be executing 3501 * concurrently with new RCU read-side critical sections that began while 3502 * synchronize_rcu() was waiting. 3503 * 3504 * RCU read-side critical sections are delimited by rcu_read_lock() 3505 * and rcu_read_unlock(), and may be nested. In addition, but only in 3506 * v5.0 and later, regions of code across which interrupts, preemption, 3507 * or softirqs have been disabled also serve as RCU read-side critical 3508 * sections. This includes hardware interrupt handlers, softirq handlers, 3509 * and NMI handlers. 3510 * 3511 * Note that this guarantee implies further memory-ordering guarantees. 3512 * On systems with more than one CPU, when synchronize_rcu() returns, 3513 * each CPU is guaranteed to have executed a full memory barrier since 3514 * the end of its last RCU read-side critical section whose beginning 3515 * preceded the call to synchronize_rcu(). In addition, each CPU having 3516 * an RCU read-side critical section that extends beyond the return from 3517 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3518 * after the beginning of synchronize_rcu() and before the beginning of 3519 * that RCU read-side critical section. Note that these guarantees include 3520 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3521 * that are executing in the kernel. 3522 * 3523 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3524 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3525 * to have executed a full memory barrier during the execution of 3526 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3527 * again only if the system has more than one CPU). 3528 * 3529 * Implementation of these memory-ordering guarantees is described here: 3530 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 3531 */ 3532 void synchronize_rcu(void) 3533 { 3534 unsigned long flags; 3535 struct rcu_node *rnp; 3536 3537 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3538 lock_is_held(&rcu_lock_map) || 3539 lock_is_held(&rcu_sched_lock_map), 3540 "Illegal synchronize_rcu() in RCU read-side critical section"); 3541 if (!rcu_blocking_is_gp()) { 3542 if (rcu_gp_is_expedited()) 3543 synchronize_rcu_expedited(); 3544 else 3545 wait_rcu_gp(call_rcu_hurry); 3546 return; 3547 } 3548 3549 // Context allows vacuous grace periods. 3550 // Note well that this code runs with !PREEMPT && !SMP. 3551 // In addition, all code that advances grace periods runs at 3552 // process level. Therefore, this normal GP overlaps with other 3553 // normal GPs only by being fully nested within them, which allows 3554 // reuse of ->gp_seq_polled_snap. 3555 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap); 3556 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap); 3557 3558 // Update the normal grace-period counters to record 3559 // this grace period, but only those used by the boot CPU. 3560 // The rcu_scheduler_starting() will take care of the rest of 3561 // these counters. 3562 local_irq_save(flags); 3563 WARN_ON_ONCE(num_online_cpus() > 1); 3564 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT); 3565 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) 3566 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 3567 local_irq_restore(flags); 3568 } 3569 EXPORT_SYMBOL_GPL(synchronize_rcu); 3570 3571 /** 3572 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie 3573 * @rgosp: Place to put state cookie 3574 * 3575 * Stores into @rgosp a value that will always be treated by functions 3576 * like poll_state_synchronize_rcu_full() as a cookie whose grace period 3577 * has already completed. 3578 */ 3579 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3580 { 3581 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; 3582 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; 3583 } 3584 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full); 3585 3586 /** 3587 * get_state_synchronize_rcu - Snapshot current RCU state 3588 * 3589 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3590 * or poll_state_synchronize_rcu() to determine whether or not a full 3591 * grace period has elapsed in the meantime. 3592 */ 3593 unsigned long get_state_synchronize_rcu(void) 3594 { 3595 /* 3596 * Any prior manipulation of RCU-protected data must happen 3597 * before the load from ->gp_seq. 3598 */ 3599 smp_mb(); /* ^^^ */ 3600 return rcu_seq_snap(&rcu_state.gp_seq_polled); 3601 } 3602 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3603 3604 /** 3605 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited 3606 * @rgosp: location to place combined normal/expedited grace-period state 3607 * 3608 * Places the normal and expedited grace-period states in @rgosp. This 3609 * state value can be passed to a later call to cond_synchronize_rcu_full() 3610 * or poll_state_synchronize_rcu_full() to determine whether or not a 3611 * grace period (whether normal or expedited) has elapsed in the meantime. 3612 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned 3613 * long, but is guaranteed to see all grace periods. In contrast, the 3614 * combined state occupies less memory, but can sometimes fail to take 3615 * grace periods into account. 3616 * 3617 * This does not guarantee that the needed grace period will actually 3618 * start. 3619 */ 3620 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3621 { 3622 struct rcu_node *rnp = rcu_get_root(); 3623 3624 /* 3625 * Any prior manipulation of RCU-protected data must happen 3626 * before the loads from ->gp_seq and ->expedited_sequence. 3627 */ 3628 smp_mb(); /* ^^^ */ 3629 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq); 3630 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); 3631 } 3632 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full); 3633 3634 /* 3635 * Helper function for start_poll_synchronize_rcu() and 3636 * start_poll_synchronize_rcu_full(). 3637 */ 3638 static void start_poll_synchronize_rcu_common(void) 3639 { 3640 unsigned long flags; 3641 bool needwake; 3642 struct rcu_data *rdp; 3643 struct rcu_node *rnp; 3644 3645 lockdep_assert_irqs_enabled(); 3646 local_irq_save(flags); 3647 rdp = this_cpu_ptr(&rcu_data); 3648 rnp = rdp->mynode; 3649 raw_spin_lock_rcu_node(rnp); // irqs already disabled. 3650 // Note it is possible for a grace period to have elapsed between 3651 // the above call to get_state_synchronize_rcu() and the below call 3652 // to rcu_seq_snap. This is OK, the worst that happens is that we 3653 // get a grace period that no one needed. These accesses are ordered 3654 // by smp_mb(), and we are accessing them in the opposite order 3655 // from which they are updated at grace-period start, as required. 3656 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); 3657 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3658 if (needwake) 3659 rcu_gp_kthread_wake(); 3660 } 3661 3662 /** 3663 * start_poll_synchronize_rcu - Snapshot and start RCU grace period 3664 * 3665 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3666 * or poll_state_synchronize_rcu() to determine whether or not a full 3667 * grace period has elapsed in the meantime. If the needed grace period 3668 * is not already slated to start, notifies RCU core of the need for that 3669 * grace period. 3670 * 3671 * Interrupts must be enabled for the case where it is necessary to awaken 3672 * the grace-period kthread. 3673 */ 3674 unsigned long start_poll_synchronize_rcu(void) 3675 { 3676 unsigned long gp_seq = get_state_synchronize_rcu(); 3677 3678 start_poll_synchronize_rcu_common(); 3679 return gp_seq; 3680 } 3681 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); 3682 3683 /** 3684 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period 3685 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 3686 * 3687 * Places the normal and expedited grace-period states in *@rgos. This 3688 * state value can be passed to a later call to cond_synchronize_rcu_full() 3689 * or poll_state_synchronize_rcu_full() to determine whether or not a 3690 * grace period (whether normal or expedited) has elapsed in the meantime. 3691 * If the needed grace period is not already slated to start, notifies 3692 * RCU core of the need for that grace period. 3693 * 3694 * Interrupts must be enabled for the case where it is necessary to awaken 3695 * the grace-period kthread. 3696 */ 3697 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3698 { 3699 get_state_synchronize_rcu_full(rgosp); 3700 3701 start_poll_synchronize_rcu_common(); 3702 } 3703 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full); 3704 3705 /** 3706 * poll_state_synchronize_rcu - Has the specified RCU grace period completed? 3707 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu() 3708 * 3709 * If a full RCU grace period has elapsed since the earlier call from 3710 * which @oldstate was obtained, return @true, otherwise return @false. 3711 * If @false is returned, it is the caller's responsibility to invoke this 3712 * function later on until it does return @true. Alternatively, the caller 3713 * can explicitly wait for a grace period, for example, by passing @oldstate 3714 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). 3715 * 3716 * Yes, this function does not take counter wrap into account. 3717 * But counter wrap is harmless. If the counter wraps, we have waited for 3718 * more than a billion grace periods (and way more on a 64-bit system!). 3719 * Those needing to keep old state values for very long time periods 3720 * (many hours even on 32-bit systems) should check them occasionally and 3721 * either refresh them or set a flag indicating that the grace period has 3722 * completed. Alternatively, they can use get_completed_synchronize_rcu() 3723 * to get a guaranteed-completed grace-period state. 3724 * 3725 * This function provides the same memory-ordering guarantees that 3726 * would be provided by a synchronize_rcu() that was invoked at the call 3727 * to the function that provided @oldstate, and that returned at the end 3728 * of this function. 3729 */ 3730 bool poll_state_synchronize_rcu(unsigned long oldstate) 3731 { 3732 if (oldstate == RCU_GET_STATE_COMPLETED || 3733 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) { 3734 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3735 return true; 3736 } 3737 return false; 3738 } 3739 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); 3740 3741 /** 3742 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed? 3743 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 3744 * 3745 * If a full RCU grace period has elapsed since the earlier call from 3746 * which *rgosp was obtained, return @true, otherwise return @false. 3747 * If @false is returned, it is the caller's responsibility to invoke this 3748 * function later on until it does return @true. Alternatively, the caller 3749 * can explicitly wait for a grace period, for example, by passing @rgosp 3750 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). 3751 * 3752 * Yes, this function does not take counter wrap into account. 3753 * But counter wrap is harmless. If the counter wraps, we have waited 3754 * for more than a billion grace periods (and way more on a 64-bit 3755 * system!). Those needing to keep rcu_gp_oldstate values for very 3756 * long time periods (many hours even on 32-bit systems) should check 3757 * them occasionally and either refresh them or set a flag indicating 3758 * that the grace period has completed. Alternatively, they can use 3759 * get_completed_synchronize_rcu_full() to get a guaranteed-completed 3760 * grace-period state. 3761 * 3762 * This function provides the same memory-ordering guarantees that would 3763 * be provided by a synchronize_rcu() that was invoked at the call to 3764 * the function that provided @rgosp, and that returned at the end of this 3765 * function. And this guarantee requires that the root rcu_node structure's 3766 * ->gp_seq field be checked instead of that of the rcu_state structure. 3767 * The problem is that the just-ending grace-period's callbacks can be 3768 * invoked between the time that the root rcu_node structure's ->gp_seq 3769 * field is updated and the time that the rcu_state structure's ->gp_seq 3770 * field is updated. Therefore, if a single synchronize_rcu() is to 3771 * cause a subsequent poll_state_synchronize_rcu_full() to return @true, 3772 * then the root rcu_node structure is the one that needs to be polled. 3773 */ 3774 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3775 { 3776 struct rcu_node *rnp = rcu_get_root(); 3777 3778 smp_mb(); // Order against root rcu_node structure grace-period cleanup. 3779 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || 3780 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || 3781 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || 3782 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { 3783 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3784 return true; 3785 } 3786 return false; 3787 } 3788 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full); 3789 3790 /** 3791 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3792 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 3793 * 3794 * If a full RCU grace period has elapsed since the earlier call to 3795 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return. 3796 * Otherwise, invoke synchronize_rcu() to wait for a full grace period. 3797 * 3798 * Yes, this function does not take counter wrap into account. 3799 * But counter wrap is harmless. If the counter wraps, we have waited for 3800 * more than 2 billion grace periods (and way more on a 64-bit system!), 3801 * so waiting for a couple of additional grace periods should be just fine. 3802 * 3803 * This function provides the same memory-ordering guarantees that 3804 * would be provided by a synchronize_rcu() that was invoked at the call 3805 * to the function that provided @oldstate and that returned at the end 3806 * of this function. 3807 */ 3808 void cond_synchronize_rcu(unsigned long oldstate) 3809 { 3810 if (!poll_state_synchronize_rcu(oldstate)) 3811 synchronize_rcu(); 3812 } 3813 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3814 3815 /** 3816 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period 3817 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 3818 * 3819 * If a full RCU grace period has elapsed since the call to 3820 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 3821 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 3822 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait 3823 * for a full grace period. 3824 * 3825 * Yes, this function does not take counter wrap into account. 3826 * But counter wrap is harmless. If the counter wraps, we have waited for 3827 * more than 2 billion grace periods (and way more on a 64-bit system!), 3828 * so waiting for a couple of additional grace periods should be just fine. 3829 * 3830 * This function provides the same memory-ordering guarantees that 3831 * would be provided by a synchronize_rcu() that was invoked at the call 3832 * to the function that provided @rgosp and that returned at the end of 3833 * this function. 3834 */ 3835 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3836 { 3837 if (!poll_state_synchronize_rcu_full(rgosp)) 3838 synchronize_rcu(); 3839 } 3840 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full); 3841 3842 /* 3843 * Check to see if there is any immediate RCU-related work to be done by 3844 * the current CPU, returning 1 if so and zero otherwise. The checks are 3845 * in order of increasing expense: checks that can be carried out against 3846 * CPU-local state are performed first. However, we must check for CPU 3847 * stalls first, else we might not get a chance. 3848 */ 3849 static int rcu_pending(int user) 3850 { 3851 bool gp_in_progress; 3852 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3853 struct rcu_node *rnp = rdp->mynode; 3854 3855 lockdep_assert_irqs_disabled(); 3856 3857 /* Check for CPU stalls, if enabled. */ 3858 check_cpu_stall(rdp); 3859 3860 /* Does this CPU need a deferred NOCB wakeup? */ 3861 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) 3862 return 1; 3863 3864 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3865 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) 3866 return 0; 3867 3868 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3869 gp_in_progress = rcu_gp_in_progress(); 3870 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3871 return 1; 3872 3873 /* Does this CPU have callbacks ready to invoke? */ 3874 if (!rcu_rdp_is_offloaded(rdp) && 3875 rcu_segcblist_ready_cbs(&rdp->cblist)) 3876 return 1; 3877 3878 /* Has RCU gone idle with this CPU needing another grace period? */ 3879 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3880 !rcu_rdp_is_offloaded(rdp) && 3881 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3882 return 1; 3883 3884 /* Have RCU grace period completed or started? */ 3885 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3886 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3887 return 1; 3888 3889 /* nothing to do */ 3890 return 0; 3891 } 3892 3893 /* 3894 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3895 * the compiler is expected to optimize this away. 3896 */ 3897 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3898 { 3899 trace_rcu_barrier(rcu_state.name, s, cpu, 3900 atomic_read(&rcu_state.barrier_cpu_count), done); 3901 } 3902 3903 /* 3904 * RCU callback function for rcu_barrier(). If we are last, wake 3905 * up the task executing rcu_barrier(). 3906 * 3907 * Note that the value of rcu_state.barrier_sequence must be captured 3908 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3909 * other CPUs might count the value down to zero before this CPU gets 3910 * around to invoking rcu_barrier_trace(), which might result in bogus 3911 * data from the next instance of rcu_barrier(). 3912 */ 3913 static void rcu_barrier_callback(struct rcu_head *rhp) 3914 { 3915 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3916 3917 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3918 rcu_barrier_trace(TPS("LastCB"), -1, s); 3919 complete(&rcu_state.barrier_completion); 3920 } else { 3921 rcu_barrier_trace(TPS("CB"), -1, s); 3922 } 3923 } 3924 3925 /* 3926 * If needed, entrain an rcu_barrier() callback on rdp->cblist. 3927 */ 3928 static void rcu_barrier_entrain(struct rcu_data *rdp) 3929 { 3930 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence); 3931 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); 3932 bool wake_nocb = false; 3933 bool was_alldone = false; 3934 3935 lockdep_assert_held(&rcu_state.barrier_lock); 3936 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq)) 3937 return; 3938 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3939 rdp->barrier_head.func = rcu_barrier_callback; 3940 debug_rcu_head_queue(&rdp->barrier_head); 3941 rcu_nocb_lock(rdp); 3942 /* 3943 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular 3944 * queue. This way we don't wait for bypass timer that can reach seconds 3945 * if it's fully lazy. 3946 */ 3947 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); 3948 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); 3949 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); 3950 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3951 atomic_inc(&rcu_state.barrier_cpu_count); 3952 } else { 3953 debug_rcu_head_unqueue(&rdp->barrier_head); 3954 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); 3955 } 3956 rcu_nocb_unlock(rdp); 3957 if (wake_nocb) 3958 wake_nocb_gp(rdp, false); 3959 smp_store_release(&rdp->barrier_seq_snap, gseq); 3960 } 3961 3962 /* 3963 * Called with preemption disabled, and from cross-cpu IRQ context. 3964 */ 3965 static void rcu_barrier_handler(void *cpu_in) 3966 { 3967 uintptr_t cpu = (uintptr_t)cpu_in; 3968 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3969 3970 lockdep_assert_irqs_disabled(); 3971 WARN_ON_ONCE(cpu != rdp->cpu); 3972 WARN_ON_ONCE(cpu != smp_processor_id()); 3973 raw_spin_lock(&rcu_state.barrier_lock); 3974 rcu_barrier_entrain(rdp); 3975 raw_spin_unlock(&rcu_state.barrier_lock); 3976 } 3977 3978 /** 3979 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3980 * 3981 * Note that this primitive does not necessarily wait for an RCU grace period 3982 * to complete. For example, if there are no RCU callbacks queued anywhere 3983 * in the system, then rcu_barrier() is within its rights to return 3984 * immediately, without waiting for anything, much less an RCU grace period. 3985 */ 3986 void rcu_barrier(void) 3987 { 3988 uintptr_t cpu; 3989 unsigned long flags; 3990 unsigned long gseq; 3991 struct rcu_data *rdp; 3992 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3993 3994 rcu_barrier_trace(TPS("Begin"), -1, s); 3995 3996 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3997 mutex_lock(&rcu_state.barrier_mutex); 3998 3999 /* Did someone else do our work for us? */ 4000 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 4001 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); 4002 smp_mb(); /* caller's subsequent code after above check. */ 4003 mutex_unlock(&rcu_state.barrier_mutex); 4004 return; 4005 } 4006 4007 /* Mark the start of the barrier operation. */ 4008 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4009 rcu_seq_start(&rcu_state.barrier_sequence); 4010 gseq = rcu_state.barrier_sequence; 4011 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 4012 4013 /* 4014 * Initialize the count to two rather than to zero in order 4015 * to avoid a too-soon return to zero in case of an immediate 4016 * invocation of the just-enqueued callback (or preemption of 4017 * this task). Exclude CPU-hotplug operations to ensure that no 4018 * offline non-offloaded CPU has callbacks queued. 4019 */ 4020 init_completion(&rcu_state.barrier_completion); 4021 atomic_set(&rcu_state.barrier_cpu_count, 2); 4022 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4023 4024 /* 4025 * Force each CPU with callbacks to register a new callback. 4026 * When that callback is invoked, we will know that all of the 4027 * corresponding CPU's preceding callbacks have been invoked. 4028 */ 4029 for_each_possible_cpu(cpu) { 4030 rdp = per_cpu_ptr(&rcu_data, cpu); 4031 retry: 4032 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) 4033 continue; 4034 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4035 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { 4036 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 4037 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4038 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence); 4039 continue; 4040 } 4041 if (!rcu_rdp_cpu_online(rdp)) { 4042 rcu_barrier_entrain(rdp); 4043 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 4044 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4045 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence); 4046 continue; 4047 } 4048 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4049 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) { 4050 schedule_timeout_uninterruptible(1); 4051 goto retry; 4052 } 4053 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 4054 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence); 4055 } 4056 4057 /* 4058 * Now that we have an rcu_barrier_callback() callback on each 4059 * CPU, and thus each counted, remove the initial count. 4060 */ 4061 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 4062 complete(&rcu_state.barrier_completion); 4063 4064 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 4065 wait_for_completion(&rcu_state.barrier_completion); 4066 4067 /* Mark the end of the barrier operation. */ 4068 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 4069 rcu_seq_end(&rcu_state.barrier_sequence); 4070 gseq = rcu_state.barrier_sequence; 4071 for_each_possible_cpu(cpu) { 4072 rdp = per_cpu_ptr(&rcu_data, cpu); 4073 4074 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 4075 } 4076 4077 /* Other rcu_barrier() invocations can now safely proceed. */ 4078 mutex_unlock(&rcu_state.barrier_mutex); 4079 } 4080 EXPORT_SYMBOL_GPL(rcu_barrier); 4081 4082 /* 4083 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 4084 * first CPU in a given leaf rcu_node structure coming online. The caller 4085 * must hold the corresponding leaf rcu_node ->lock with interrupts 4086 * disabled. 4087 */ 4088 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 4089 { 4090 long mask; 4091 long oldmask; 4092 struct rcu_node *rnp = rnp_leaf; 4093 4094 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4095 WARN_ON_ONCE(rnp->wait_blkd_tasks); 4096 for (;;) { 4097 mask = rnp->grpmask; 4098 rnp = rnp->parent; 4099 if (rnp == NULL) 4100 return; 4101 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4102 oldmask = rnp->qsmaskinit; 4103 rnp->qsmaskinit |= mask; 4104 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 4105 if (oldmask) 4106 return; 4107 } 4108 } 4109 4110 /* 4111 * Do boot-time initialization of a CPU's per-CPU RCU data. 4112 */ 4113 static void __init 4114 rcu_boot_init_percpu_data(int cpu) 4115 { 4116 struct context_tracking *ct = this_cpu_ptr(&context_tracking); 4117 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4118 4119 /* Set up local state, ensuring consistent view of global state. */ 4120 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 4121 INIT_WORK(&rdp->strict_work, strict_work_handler); 4122 WARN_ON_ONCE(ct->dynticks_nesting != 1); 4123 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu))); 4124 rdp->barrier_seq_snap = rcu_state.barrier_sequence; 4125 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 4126 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 4127 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 4128 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 4129 rdp->last_sched_clock = jiffies; 4130 rdp->cpu = cpu; 4131 rcu_boot_init_nocb_percpu_data(rdp); 4132 } 4133 4134 /* 4135 * Invoked early in the CPU-online process, when pretty much all services 4136 * are available. The incoming CPU is not present. 4137 * 4138 * Initializes a CPU's per-CPU RCU data. Note that only one online or 4139 * offline event can be happening at a given time. Note also that we can 4140 * accept some slop in the rsp->gp_seq access due to the fact that this 4141 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 4142 * And any offloaded callbacks are being numbered elsewhere. 4143 */ 4144 int rcutree_prepare_cpu(unsigned int cpu) 4145 { 4146 unsigned long flags; 4147 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); 4148 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4149 struct rcu_node *rnp = rcu_get_root(); 4150 4151 /* Set up local state, ensuring consistent view of global state. */ 4152 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4153 rdp->qlen_last_fqs_check = 0; 4154 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 4155 rdp->blimit = blimit; 4156 ct->dynticks_nesting = 1; /* CPU not up, no tearing. */ 4157 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4158 4159 /* 4160 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be 4161 * (re-)initialized. 4162 */ 4163 if (!rcu_segcblist_is_enabled(&rdp->cblist)) 4164 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 4165 4166 /* 4167 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4168 * propagation up the rcu_node tree will happen at the beginning 4169 * of the next grace period. 4170 */ 4171 rnp = rdp->mynode; 4172 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4173 rdp->beenonline = true; /* We have now been online. */ 4174 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 4175 rdp->gp_seq_needed = rdp->gp_seq; 4176 rdp->cpu_no_qs.b.norm = true; 4177 rdp->core_needs_qs = false; 4178 rdp->rcu_iw_pending = false; 4179 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4180 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4181 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4182 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4183 rcu_spawn_one_boost_kthread(rnp); 4184 rcu_spawn_cpu_nocb_kthread(cpu); 4185 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4186 4187 return 0; 4188 } 4189 4190 /* 4191 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 4192 */ 4193 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 4194 { 4195 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4196 4197 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 4198 } 4199 4200 /* 4201 * Near the end of the CPU-online process. Pretty much all services 4202 * enabled, and the CPU is now very much alive. 4203 */ 4204 int rcutree_online_cpu(unsigned int cpu) 4205 { 4206 unsigned long flags; 4207 struct rcu_data *rdp; 4208 struct rcu_node *rnp; 4209 4210 rdp = per_cpu_ptr(&rcu_data, cpu); 4211 rnp = rdp->mynode; 4212 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4213 rnp->ffmask |= rdp->grpmask; 4214 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4215 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 4216 return 0; /* Too early in boot for scheduler work. */ 4217 sync_sched_exp_online_cleanup(cpu); 4218 rcutree_affinity_setting(cpu, -1); 4219 4220 // Stop-machine done, so allow nohz_full to disable tick. 4221 tick_dep_clear(TICK_DEP_BIT_RCU); 4222 return 0; 4223 } 4224 4225 /* 4226 * Near the beginning of the process. The CPU is still very much alive 4227 * with pretty much all services enabled. 4228 */ 4229 int rcutree_offline_cpu(unsigned int cpu) 4230 { 4231 unsigned long flags; 4232 struct rcu_data *rdp; 4233 struct rcu_node *rnp; 4234 4235 rdp = per_cpu_ptr(&rcu_data, cpu); 4236 rnp = rdp->mynode; 4237 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4238 rnp->ffmask &= ~rdp->grpmask; 4239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4240 4241 rcutree_affinity_setting(cpu, cpu); 4242 4243 // nohz_full CPUs need the tick for stop-machine to work quickly 4244 tick_dep_set(TICK_DEP_BIT_RCU); 4245 return 0; 4246 } 4247 4248 /* 4249 * Mark the specified CPU as being online so that subsequent grace periods 4250 * (both expedited and normal) will wait on it. Note that this means that 4251 * incoming CPUs are not allowed to use RCU read-side critical sections 4252 * until this function is called. Failing to observe this restriction 4253 * will result in lockdep splats. 4254 * 4255 * Note that this function is special in that it is invoked directly 4256 * from the incoming CPU rather than from the cpuhp_step mechanism. 4257 * This is because this function must be invoked at a precise location. 4258 */ 4259 void rcu_cpu_starting(unsigned int cpu) 4260 { 4261 unsigned long flags; 4262 unsigned long mask; 4263 struct rcu_data *rdp; 4264 struct rcu_node *rnp; 4265 bool newcpu; 4266 4267 rdp = per_cpu_ptr(&rcu_data, cpu); 4268 if (rdp->cpu_started) 4269 return; 4270 rdp->cpu_started = true; 4271 4272 rnp = rdp->mynode; 4273 mask = rdp->grpmask; 4274 local_irq_save(flags); 4275 arch_spin_lock(&rcu_state.ofl_lock); 4276 rcu_dynticks_eqs_online(); 4277 raw_spin_lock(&rcu_state.barrier_lock); 4278 raw_spin_lock_rcu_node(rnp); 4279 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 4280 raw_spin_unlock(&rcu_state.barrier_lock); 4281 newcpu = !(rnp->expmaskinitnext & mask); 4282 rnp->expmaskinitnext |= mask; 4283 /* Allow lockless access for expedited grace periods. */ 4284 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 4285 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 4286 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 4287 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4288 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4289 4290 /* An incoming CPU should never be blocking a grace period. */ 4291 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ 4292 /* rcu_report_qs_rnp() *really* wants some flags to restore */ 4293 unsigned long flags2; 4294 4295 local_irq_save(flags2); 4296 rcu_disable_urgency_upon_qs(rdp); 4297 /* Report QS -after- changing ->qsmaskinitnext! */ 4298 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2); 4299 } else { 4300 raw_spin_unlock_rcu_node(rnp); 4301 } 4302 arch_spin_unlock(&rcu_state.ofl_lock); 4303 local_irq_restore(flags); 4304 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 4305 } 4306 4307 /* 4308 * The outgoing function has no further need of RCU, so remove it from 4309 * the rcu_node tree's ->qsmaskinitnext bit masks. 4310 * 4311 * Note that this function is special in that it is invoked directly 4312 * from the outgoing CPU rather than from the cpuhp_step mechanism. 4313 * This is because this function must be invoked at a precise location. 4314 */ 4315 void rcu_report_dead(unsigned int cpu) 4316 { 4317 unsigned long flags, seq_flags; 4318 unsigned long mask; 4319 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4320 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4321 4322 // Do any dangling deferred wakeups. 4323 do_nocb_deferred_wakeup(rdp); 4324 4325 rcu_preempt_deferred_qs(current); 4326 4327 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4328 mask = rdp->grpmask; 4329 local_irq_save(seq_flags); 4330 arch_spin_lock(&rcu_state.ofl_lock); 4331 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4332 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4333 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4334 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 4335 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 4336 rcu_disable_urgency_upon_qs(rdp); 4337 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4338 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4339 } 4340 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 4341 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4342 arch_spin_unlock(&rcu_state.ofl_lock); 4343 local_irq_restore(seq_flags); 4344 4345 rdp->cpu_started = false; 4346 } 4347 4348 #ifdef CONFIG_HOTPLUG_CPU 4349 /* 4350 * The outgoing CPU has just passed through the dying-idle state, and we 4351 * are being invoked from the CPU that was IPIed to continue the offline 4352 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 4353 */ 4354 void rcutree_migrate_callbacks(int cpu) 4355 { 4356 unsigned long flags; 4357 struct rcu_data *my_rdp; 4358 struct rcu_node *my_rnp; 4359 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4360 bool needwake; 4361 4362 if (rcu_rdp_is_offloaded(rdp) || 4363 rcu_segcblist_empty(&rdp->cblist)) 4364 return; /* No callbacks to migrate. */ 4365 4366 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4367 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); 4368 rcu_barrier_entrain(rdp); 4369 my_rdp = this_cpu_ptr(&rcu_data); 4370 my_rnp = my_rdp->mynode; 4371 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 4372 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false)); 4373 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 4374 /* Leverage recent GPs and set GP for new callbacks. */ 4375 needwake = rcu_advance_cbs(my_rnp, rdp) || 4376 rcu_advance_cbs(my_rnp, my_rdp); 4377 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 4378 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */ 4379 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 4380 rcu_segcblist_disable(&rdp->cblist); 4381 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); 4382 check_cb_ovld_locked(my_rdp, my_rnp); 4383 if (rcu_rdp_is_offloaded(my_rdp)) { 4384 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4385 __call_rcu_nocb_wake(my_rdp, true, flags); 4386 } else { 4387 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 4388 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); 4389 } 4390 if (needwake) 4391 rcu_gp_kthread_wake(); 4392 lockdep_assert_irqs_enabled(); 4393 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 4394 !rcu_segcblist_empty(&rdp->cblist), 4395 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 4396 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 4397 rcu_segcblist_first_cb(&rdp->cblist)); 4398 } 4399 #endif 4400 4401 /* 4402 * On non-huge systems, use expedited RCU grace periods to make suspend 4403 * and hibernation run faster. 4404 */ 4405 static int rcu_pm_notify(struct notifier_block *self, 4406 unsigned long action, void *hcpu) 4407 { 4408 switch (action) { 4409 case PM_HIBERNATION_PREPARE: 4410 case PM_SUSPEND_PREPARE: 4411 rcu_expedite_gp(); 4412 break; 4413 case PM_POST_HIBERNATION: 4414 case PM_POST_SUSPEND: 4415 rcu_unexpedite_gp(); 4416 break; 4417 default: 4418 break; 4419 } 4420 return NOTIFY_OK; 4421 } 4422 4423 #ifdef CONFIG_RCU_EXP_KTHREAD 4424 struct kthread_worker *rcu_exp_gp_kworker; 4425 struct kthread_worker *rcu_exp_par_gp_kworker; 4426 4427 static void __init rcu_start_exp_gp_kworkers(void) 4428 { 4429 const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker"; 4430 const char *gp_kworker_name = "rcu_exp_gp_kthread_worker"; 4431 struct sched_param param = { .sched_priority = kthread_prio }; 4432 4433 rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name); 4434 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4435 pr_err("Failed to create %s!\n", gp_kworker_name); 4436 return; 4437 } 4438 4439 rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name); 4440 if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) { 4441 pr_err("Failed to create %s!\n", par_gp_kworker_name); 4442 kthread_destroy_worker(rcu_exp_gp_kworker); 4443 return; 4444 } 4445 4446 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); 4447 sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO, 4448 ¶m); 4449 } 4450 4451 static inline void rcu_alloc_par_gp_wq(void) 4452 { 4453 } 4454 #else /* !CONFIG_RCU_EXP_KTHREAD */ 4455 struct workqueue_struct *rcu_par_gp_wq; 4456 4457 static void __init rcu_start_exp_gp_kworkers(void) 4458 { 4459 } 4460 4461 static inline void rcu_alloc_par_gp_wq(void) 4462 { 4463 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 4464 WARN_ON(!rcu_par_gp_wq); 4465 } 4466 #endif /* CONFIG_RCU_EXP_KTHREAD */ 4467 4468 /* 4469 * Spawn the kthreads that handle RCU's grace periods. 4470 */ 4471 static int __init rcu_spawn_gp_kthread(void) 4472 { 4473 unsigned long flags; 4474 struct rcu_node *rnp; 4475 struct sched_param sp; 4476 struct task_struct *t; 4477 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 4478 4479 rcu_scheduler_fully_active = 1; 4480 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4481 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4482 return 0; 4483 if (kthread_prio) { 4484 sp.sched_priority = kthread_prio; 4485 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4486 } 4487 rnp = rcu_get_root(); 4488 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4489 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4490 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4491 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4492 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4493 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4494 wake_up_process(t); 4495 /* This is a pre-SMP initcall, we expect a single CPU */ 4496 WARN_ON(num_online_cpus() > 1); 4497 /* 4498 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() 4499 * due to rcu_scheduler_fully_active. 4500 */ 4501 rcu_spawn_cpu_nocb_kthread(smp_processor_id()); 4502 rcu_spawn_one_boost_kthread(rdp->mynode); 4503 rcu_spawn_core_kthreads(); 4504 /* Create kthread worker for expedited GPs */ 4505 rcu_start_exp_gp_kworkers(); 4506 return 0; 4507 } 4508 early_initcall(rcu_spawn_gp_kthread); 4509 4510 /* 4511 * This function is invoked towards the end of the scheduler's 4512 * initialization process. Before this is called, the idle task might 4513 * contain synchronous grace-period primitives (during which time, this idle 4514 * task is booting the system, and such primitives are no-ops). After this 4515 * function is called, any synchronous grace-period primitives are run as 4516 * expedited, with the requesting task driving the grace period forward. 4517 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4518 * runtime RCU functionality. 4519 */ 4520 void rcu_scheduler_starting(void) 4521 { 4522 unsigned long flags; 4523 struct rcu_node *rnp; 4524 4525 WARN_ON(num_online_cpus() != 1); 4526 WARN_ON(nr_context_switches() > 0); 4527 rcu_test_sync_prims(); 4528 4529 // Fix up the ->gp_seq counters. 4530 local_irq_save(flags); 4531 rcu_for_each_node_breadth_first(rnp) 4532 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 4533 local_irq_restore(flags); 4534 4535 // Switch out of early boot mode. 4536 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4537 rcu_test_sync_prims(); 4538 } 4539 4540 /* 4541 * Helper function for rcu_init() that initializes the rcu_state structure. 4542 */ 4543 static void __init rcu_init_one(void) 4544 { 4545 static const char * const buf[] = RCU_NODE_NAME_INIT; 4546 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4547 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4548 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4549 4550 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4551 int cpustride = 1; 4552 int i; 4553 int j; 4554 struct rcu_node *rnp; 4555 4556 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4557 4558 /* Silence gcc 4.8 false positive about array index out of range. */ 4559 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4560 panic("rcu_init_one: rcu_num_lvls out of range"); 4561 4562 /* Initialize the level-tracking arrays. */ 4563 4564 for (i = 1; i < rcu_num_lvls; i++) 4565 rcu_state.level[i] = 4566 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4567 rcu_init_levelspread(levelspread, num_rcu_lvl); 4568 4569 /* Initialize the elements themselves, starting from the leaves. */ 4570 4571 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4572 cpustride *= levelspread[i]; 4573 rnp = rcu_state.level[i]; 4574 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4575 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4576 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4577 &rcu_node_class[i], buf[i]); 4578 raw_spin_lock_init(&rnp->fqslock); 4579 lockdep_set_class_and_name(&rnp->fqslock, 4580 &rcu_fqs_class[i], fqs[i]); 4581 rnp->gp_seq = rcu_state.gp_seq; 4582 rnp->gp_seq_needed = rcu_state.gp_seq; 4583 rnp->completedqs = rcu_state.gp_seq; 4584 rnp->qsmask = 0; 4585 rnp->qsmaskinit = 0; 4586 rnp->grplo = j * cpustride; 4587 rnp->grphi = (j + 1) * cpustride - 1; 4588 if (rnp->grphi >= nr_cpu_ids) 4589 rnp->grphi = nr_cpu_ids - 1; 4590 if (i == 0) { 4591 rnp->grpnum = 0; 4592 rnp->grpmask = 0; 4593 rnp->parent = NULL; 4594 } else { 4595 rnp->grpnum = j % levelspread[i - 1]; 4596 rnp->grpmask = BIT(rnp->grpnum); 4597 rnp->parent = rcu_state.level[i - 1] + 4598 j / levelspread[i - 1]; 4599 } 4600 rnp->level = i; 4601 INIT_LIST_HEAD(&rnp->blkd_tasks); 4602 rcu_init_one_nocb(rnp); 4603 init_waitqueue_head(&rnp->exp_wq[0]); 4604 init_waitqueue_head(&rnp->exp_wq[1]); 4605 init_waitqueue_head(&rnp->exp_wq[2]); 4606 init_waitqueue_head(&rnp->exp_wq[3]); 4607 spin_lock_init(&rnp->exp_lock); 4608 mutex_init(&rnp->boost_kthread_mutex); 4609 raw_spin_lock_init(&rnp->exp_poll_lock); 4610 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 4611 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); 4612 } 4613 } 4614 4615 init_swait_queue_head(&rcu_state.gp_wq); 4616 init_swait_queue_head(&rcu_state.expedited_wq); 4617 rnp = rcu_first_leaf_node(); 4618 for_each_possible_cpu(i) { 4619 while (i > rnp->grphi) 4620 rnp++; 4621 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4622 rcu_boot_init_percpu_data(i); 4623 } 4624 } 4625 4626 /* 4627 * Force priority from the kernel command-line into range. 4628 */ 4629 static void __init sanitize_kthread_prio(void) 4630 { 4631 int kthread_prio_in = kthread_prio; 4632 4633 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4634 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4635 kthread_prio = 2; 4636 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4637 kthread_prio = 1; 4638 else if (kthread_prio < 0) 4639 kthread_prio = 0; 4640 else if (kthread_prio > 99) 4641 kthread_prio = 99; 4642 4643 if (kthread_prio != kthread_prio_in) 4644 pr_alert("%s: Limited prio to %d from %d\n", 4645 __func__, kthread_prio, kthread_prio_in); 4646 } 4647 4648 /* 4649 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4650 * replace the definitions in tree.h because those are needed to size 4651 * the ->node array in the rcu_state structure. 4652 */ 4653 void rcu_init_geometry(void) 4654 { 4655 ulong d; 4656 int i; 4657 static unsigned long old_nr_cpu_ids; 4658 int rcu_capacity[RCU_NUM_LVLS]; 4659 static bool initialized; 4660 4661 if (initialized) { 4662 /* 4663 * Warn if setup_nr_cpu_ids() had not yet been invoked, 4664 * unless nr_cpus_ids == NR_CPUS, in which case who cares? 4665 */ 4666 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids); 4667 return; 4668 } 4669 4670 old_nr_cpu_ids = nr_cpu_ids; 4671 initialized = true; 4672 4673 /* 4674 * Initialize any unspecified boot parameters. 4675 * The default values of jiffies_till_first_fqs and 4676 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4677 * value, which is a function of HZ, then adding one for each 4678 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4679 */ 4680 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4681 if (jiffies_till_first_fqs == ULONG_MAX) 4682 jiffies_till_first_fqs = d; 4683 if (jiffies_till_next_fqs == ULONG_MAX) 4684 jiffies_till_next_fqs = d; 4685 adjust_jiffies_till_sched_qs(); 4686 4687 /* If the compile-time values are accurate, just leave. */ 4688 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4689 nr_cpu_ids == NR_CPUS) 4690 return; 4691 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4692 rcu_fanout_leaf, nr_cpu_ids); 4693 4694 /* 4695 * The boot-time rcu_fanout_leaf parameter must be at least two 4696 * and cannot exceed the number of bits in the rcu_node masks. 4697 * Complain and fall back to the compile-time values if this 4698 * limit is exceeded. 4699 */ 4700 if (rcu_fanout_leaf < 2 || 4701 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4702 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4703 WARN_ON(1); 4704 return; 4705 } 4706 4707 /* 4708 * Compute number of nodes that can be handled an rcu_node tree 4709 * with the given number of levels. 4710 */ 4711 rcu_capacity[0] = rcu_fanout_leaf; 4712 for (i = 1; i < RCU_NUM_LVLS; i++) 4713 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4714 4715 /* 4716 * The tree must be able to accommodate the configured number of CPUs. 4717 * If this limit is exceeded, fall back to the compile-time values. 4718 */ 4719 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4720 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4721 WARN_ON(1); 4722 return; 4723 } 4724 4725 /* Calculate the number of levels in the tree. */ 4726 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4727 } 4728 rcu_num_lvls = i + 1; 4729 4730 /* Calculate the number of rcu_nodes at each level of the tree. */ 4731 for (i = 0; i < rcu_num_lvls; i++) { 4732 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4733 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4734 } 4735 4736 /* Calculate the total number of rcu_node structures. */ 4737 rcu_num_nodes = 0; 4738 for (i = 0; i < rcu_num_lvls; i++) 4739 rcu_num_nodes += num_rcu_lvl[i]; 4740 } 4741 4742 /* 4743 * Dump out the structure of the rcu_node combining tree associated 4744 * with the rcu_state structure. 4745 */ 4746 static void __init rcu_dump_rcu_node_tree(void) 4747 { 4748 int level = 0; 4749 struct rcu_node *rnp; 4750 4751 pr_info("rcu_node tree layout dump\n"); 4752 pr_info(" "); 4753 rcu_for_each_node_breadth_first(rnp) { 4754 if (rnp->level != level) { 4755 pr_cont("\n"); 4756 pr_info(" "); 4757 level = rnp->level; 4758 } 4759 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4760 } 4761 pr_cont("\n"); 4762 } 4763 4764 struct workqueue_struct *rcu_gp_wq; 4765 4766 static void __init kfree_rcu_batch_init(void) 4767 { 4768 int cpu; 4769 int i; 4770 4771 /* Clamp it to [0:100] seconds interval. */ 4772 if (rcu_delay_page_cache_fill_msec < 0 || 4773 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) { 4774 4775 rcu_delay_page_cache_fill_msec = 4776 clamp(rcu_delay_page_cache_fill_msec, 0, 4777 (int) (100 * MSEC_PER_SEC)); 4778 4779 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n", 4780 rcu_delay_page_cache_fill_msec); 4781 } 4782 4783 for_each_possible_cpu(cpu) { 4784 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 4785 4786 for (i = 0; i < KFREE_N_BATCHES; i++) { 4787 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); 4788 krcp->krw_arr[i].krcp = krcp; 4789 } 4790 4791 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); 4792 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); 4793 krcp->initialized = true; 4794 } 4795 if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree")) 4796 pr_err("Failed to register kfree_rcu() shrinker!\n"); 4797 } 4798 4799 void __init rcu_init(void) 4800 { 4801 int cpu = smp_processor_id(); 4802 4803 rcu_early_boot_tests(); 4804 4805 kfree_rcu_batch_init(); 4806 rcu_bootup_announce(); 4807 sanitize_kthread_prio(); 4808 rcu_init_geometry(); 4809 rcu_init_one(); 4810 if (dump_tree) 4811 rcu_dump_rcu_node_tree(); 4812 if (use_softirq) 4813 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4814 4815 /* 4816 * We don't need protection against CPU-hotplug here because 4817 * this is called early in boot, before either interrupts 4818 * or the scheduler are operational. 4819 */ 4820 pm_notifier(rcu_pm_notify, 0); 4821 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. 4822 rcutree_prepare_cpu(cpu); 4823 rcu_cpu_starting(cpu); 4824 rcutree_online_cpu(cpu); 4825 4826 /* Create workqueue for Tree SRCU and for expedited GPs. */ 4827 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 4828 WARN_ON(!rcu_gp_wq); 4829 rcu_alloc_par_gp_wq(); 4830 4831 /* Fill in default value for rcutree.qovld boot parameter. */ 4832 /* -After- the rcu_node ->lock fields are initialized! */ 4833 if (qovld < 0) 4834 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4835 else 4836 qovld_calc = qovld; 4837 4838 // Kick-start any polled grace periods that started early. 4839 if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1)) 4840 (void)start_poll_synchronize_rcu_expedited(); 4841 } 4842 4843 #include "tree_stall.h" 4844 #include "tree_exp.h" 4845 #include "tree_nocb.h" 4846 #include "tree_plugin.h" 4847