1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Task-based RCU implementations. 4 * 5 * Copyright (C) 2020 Paul E. McKenney 6 */ 7 8 #ifdef CONFIG_TASKS_RCU_GENERIC 9 #include "rcu_segcblist.h" 10 11 //////////////////////////////////////////////////////////////////////// 12 // 13 // Generic data structures. 14 15 struct rcu_tasks; 16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); 17 typedef void (*pregp_func_t)(struct list_head *hop); 18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); 19 typedef void (*postscan_func_t)(struct list_head *hop); 20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); 21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp); 22 23 /** 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. 25 * @cblist: Callback list. 26 * @lock: Lock protecting per-CPU callback list. 27 * @rtp_jiffies: Jiffies counter value for statistics. 28 * @lazy_timer: Timer to unlazify callbacks. 29 * @urgent_gp: Number of additional non-lazy grace periods. 30 * @rtp_n_lock_retries: Rough lock-contention statistic. 31 * @rtp_work: Work queue for invoking callbacks. 32 * @rtp_irq_work: IRQ work queue for deferred wakeups. 33 * @barrier_q_head: RCU callback for barrier operation. 34 * @rtp_blkd_tasks: List of tasks blocked as readers. 35 * @cpu: CPU number corresponding to this entry. 36 * @rtpp: Pointer to the rcu_tasks structure. 37 */ 38 struct rcu_tasks_percpu { 39 struct rcu_segcblist cblist; 40 raw_spinlock_t __private lock; 41 unsigned long rtp_jiffies; 42 unsigned long rtp_n_lock_retries; 43 struct timer_list lazy_timer; 44 unsigned int urgent_gp; 45 struct work_struct rtp_work; 46 struct irq_work rtp_irq_work; 47 struct rcu_head barrier_q_head; 48 struct list_head rtp_blkd_tasks; 49 int cpu; 50 struct rcu_tasks *rtpp; 51 }; 52 53 /** 54 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. 55 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. 56 * @cbs_gbl_lock: Lock protecting callback list. 57 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. 58 * @gp_func: This flavor's grace-period-wait function. 59 * @gp_state: Grace period's most recent state transition (debugging). 60 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. 61 * @init_fract: Initial backoff sleep interval. 62 * @gp_jiffies: Time of last @gp_state transition. 63 * @gp_start: Most recent grace-period start in jiffies. 64 * @tasks_gp_seq: Number of grace periods completed since boot. 65 * @n_ipis: Number of IPIs sent to encourage grace periods to end. 66 * @n_ipis_fails: Number of IPI-send failures. 67 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. 68 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. 69 * @pregp_func: This flavor's pre-grace-period function (optional). 70 * @pertask_func: This flavor's per-task scan function (optional). 71 * @postscan_func: This flavor's post-task scan function (optional). 72 * @holdouts_func: This flavor's holdout-list scan function (optional). 73 * @postgp_func: This flavor's post-grace-period function (optional). 74 * @call_func: This flavor's call_rcu()-equivalent function. 75 * @rtpcpu: This flavor's rcu_tasks_percpu structure. 76 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. 77 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. 78 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. 79 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. 80 * @barrier_q_mutex: Serialize barrier operations. 81 * @barrier_q_count: Number of queues being waited on. 82 * @barrier_q_completion: Barrier wait/wakeup mechanism. 83 * @barrier_q_seq: Sequence number for barrier operations. 84 * @name: This flavor's textual name. 85 * @kname: This flavor's kthread name. 86 */ 87 struct rcu_tasks { 88 struct rcuwait cbs_wait; 89 raw_spinlock_t cbs_gbl_lock; 90 struct mutex tasks_gp_mutex; 91 int gp_state; 92 int gp_sleep; 93 int init_fract; 94 unsigned long gp_jiffies; 95 unsigned long gp_start; 96 unsigned long tasks_gp_seq; 97 unsigned long n_ipis; 98 unsigned long n_ipis_fails; 99 struct task_struct *kthread_ptr; 100 unsigned long lazy_jiffies; 101 rcu_tasks_gp_func_t gp_func; 102 pregp_func_t pregp_func; 103 pertask_func_t pertask_func; 104 postscan_func_t postscan_func; 105 holdouts_func_t holdouts_func; 106 postgp_func_t postgp_func; 107 call_rcu_func_t call_func; 108 struct rcu_tasks_percpu __percpu *rtpcpu; 109 int percpu_enqueue_shift; 110 int percpu_enqueue_lim; 111 int percpu_dequeue_lim; 112 unsigned long percpu_dequeue_gpseq; 113 struct mutex barrier_q_mutex; 114 atomic_t barrier_q_count; 115 struct completion barrier_q_completion; 116 unsigned long barrier_q_seq; 117 char *name; 118 char *kname; 119 }; 120 121 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); 122 123 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ 124 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ 125 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ 126 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ 127 }; \ 128 static struct rcu_tasks rt_name = \ 129 { \ 130 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ 131 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ 132 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ 133 .gp_func = gp, \ 134 .call_func = call, \ 135 .rtpcpu = &rt_name ## __percpu, \ 136 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ 137 .name = n, \ 138 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ 139 .percpu_enqueue_lim = 1, \ 140 .percpu_dequeue_lim = 1, \ 141 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ 142 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ 143 .kname = #rt_name, \ 144 } 145 146 #ifdef CONFIG_TASKS_RCU 147 /* Track exiting tasks in order to allow them to be waited for. */ 148 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); 149 150 /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */ 151 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); 152 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); 153 #endif 154 155 /* Avoid IPIing CPUs early in the grace period. */ 156 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) 157 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; 158 module_param(rcu_task_ipi_delay, int, 0644); 159 160 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 161 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) 162 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) 163 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; 164 module_param(rcu_task_stall_timeout, int, 0644); 165 #define RCU_TASK_STALL_INFO (HZ * 10) 166 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; 167 module_param(rcu_task_stall_info, int, 0644); 168 static int rcu_task_stall_info_mult __read_mostly = 3; 169 module_param(rcu_task_stall_info_mult, int, 0444); 170 171 static int rcu_task_enqueue_lim __read_mostly = -1; 172 module_param(rcu_task_enqueue_lim, int, 0444); 173 174 static bool rcu_task_cb_adjust; 175 static int rcu_task_contend_lim __read_mostly = 100; 176 module_param(rcu_task_contend_lim, int, 0444); 177 static int rcu_task_collapse_lim __read_mostly = 10; 178 module_param(rcu_task_collapse_lim, int, 0444); 179 static int rcu_task_lazy_lim __read_mostly = 32; 180 module_param(rcu_task_lazy_lim, int, 0444); 181 182 /* RCU tasks grace-period state for debugging. */ 183 #define RTGS_INIT 0 184 #define RTGS_WAIT_WAIT_CBS 1 185 #define RTGS_WAIT_GP 2 186 #define RTGS_PRE_WAIT_GP 3 187 #define RTGS_SCAN_TASKLIST 4 188 #define RTGS_POST_SCAN_TASKLIST 5 189 #define RTGS_WAIT_SCAN_HOLDOUTS 6 190 #define RTGS_SCAN_HOLDOUTS 7 191 #define RTGS_POST_GP 8 192 #define RTGS_WAIT_READERS 9 193 #define RTGS_INVOKE_CBS 10 194 #define RTGS_WAIT_CBS 11 195 #ifndef CONFIG_TINY_RCU 196 static const char * const rcu_tasks_gp_state_names[] = { 197 "RTGS_INIT", 198 "RTGS_WAIT_WAIT_CBS", 199 "RTGS_WAIT_GP", 200 "RTGS_PRE_WAIT_GP", 201 "RTGS_SCAN_TASKLIST", 202 "RTGS_POST_SCAN_TASKLIST", 203 "RTGS_WAIT_SCAN_HOLDOUTS", 204 "RTGS_SCAN_HOLDOUTS", 205 "RTGS_POST_GP", 206 "RTGS_WAIT_READERS", 207 "RTGS_INVOKE_CBS", 208 "RTGS_WAIT_CBS", 209 }; 210 #endif /* #ifndef CONFIG_TINY_RCU */ 211 212 //////////////////////////////////////////////////////////////////////// 213 // 214 // Generic code. 215 216 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); 217 218 /* Record grace-period phase and time. */ 219 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) 220 { 221 rtp->gp_state = newstate; 222 rtp->gp_jiffies = jiffies; 223 } 224 225 #ifndef CONFIG_TINY_RCU 226 /* Return state name. */ 227 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) 228 { 229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races 230 int j = READ_ONCE(i); // Prevent the compiler from reading twice 231 232 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) 233 return "???"; 234 return rcu_tasks_gp_state_names[j]; 235 } 236 #endif /* #ifndef CONFIG_TINY_RCU */ 237 238 // Initialize per-CPU callback lists for the specified flavor of 239 // Tasks RCU. Do not enqueue callbacks before this function is invoked. 240 static void cblist_init_generic(struct rcu_tasks *rtp) 241 { 242 int cpu; 243 unsigned long flags; 244 int lim; 245 int shift; 246 247 if (rcu_task_enqueue_lim < 0) { 248 rcu_task_enqueue_lim = 1; 249 rcu_task_cb_adjust = true; 250 } else if (rcu_task_enqueue_lim == 0) { 251 rcu_task_enqueue_lim = 1; 252 } 253 lim = rcu_task_enqueue_lim; 254 255 if (lim > nr_cpu_ids) 256 lim = nr_cpu_ids; 257 shift = ilog2(nr_cpu_ids / lim); 258 if (((nr_cpu_ids - 1) >> shift) >= lim) 259 shift++; 260 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); 261 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); 262 smp_store_release(&rtp->percpu_enqueue_lim, lim); 263 for_each_possible_cpu(cpu) { 264 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 265 266 WARN_ON_ONCE(!rtpcp); 267 if (cpu) 268 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); 269 local_irq_save(flags); // serialize initialization 270 if (rcu_segcblist_empty(&rtpcp->cblist)) 271 rcu_segcblist_init(&rtpcp->cblist); 272 local_irq_restore(flags); 273 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); 274 rtpcp->cpu = cpu; 275 rtpcp->rtpp = rtp; 276 if (!rtpcp->rtp_blkd_tasks.next) 277 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 278 } 279 280 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name, 281 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); 282 } 283 284 // Compute wakeup time for lazy callback timer. 285 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) 286 { 287 return jiffies + rtp->lazy_jiffies; 288 } 289 290 // Timer handler that unlazifies lazy callbacks. 291 static void call_rcu_tasks_generic_timer(struct timer_list *tlp) 292 { 293 unsigned long flags; 294 bool needwake = false; 295 struct rcu_tasks *rtp; 296 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer); 297 298 rtp = rtpcp->rtpp; 299 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 300 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { 301 if (!rtpcp->urgent_gp) 302 rtpcp->urgent_gp = 1; 303 needwake = true; 304 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); 305 } 306 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 307 if (needwake) 308 rcuwait_wake_up(&rtp->cbs_wait); 309 } 310 311 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). 312 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) 313 { 314 struct rcu_tasks *rtp; 315 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); 316 317 rtp = rtpcp->rtpp; 318 rcuwait_wake_up(&rtp->cbs_wait); 319 } 320 321 // Enqueue a callback for the specified flavor of Tasks RCU. 322 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, 323 struct rcu_tasks *rtp) 324 { 325 int chosen_cpu; 326 unsigned long flags; 327 bool havekthread = smp_load_acquire(&rtp->kthread_ptr); 328 int ideal_cpu; 329 unsigned long j; 330 bool needadjust = false; 331 bool needwake; 332 struct rcu_tasks_percpu *rtpcp; 333 334 rhp->next = NULL; 335 rhp->func = func; 336 local_irq_save(flags); 337 rcu_read_lock(); 338 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); 339 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); 340 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); 341 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. 342 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 343 j = jiffies; 344 if (rtpcp->rtp_jiffies != j) { 345 rtpcp->rtp_jiffies = j; 346 rtpcp->rtp_n_lock_retries = 0; 347 } 348 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && 349 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) 350 needadjust = true; // Defer adjustment to avoid deadlock. 351 } 352 // Queuing callbacks before initialization not yet supported. 353 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) 354 rcu_segcblist_init(&rtpcp->cblist); 355 needwake = (func == wakeme_after_rcu) || 356 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); 357 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { 358 if (rtp->lazy_jiffies) 359 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); 360 else 361 needwake = rcu_segcblist_empty(&rtpcp->cblist); 362 } 363 if (needwake) 364 rtpcp->urgent_gp = 3; 365 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); 366 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 367 if (unlikely(needadjust)) { 368 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 369 if (rtp->percpu_enqueue_lim != nr_cpu_ids) { 370 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); 371 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); 372 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); 373 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); 374 } 375 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 376 } 377 rcu_read_unlock(); 378 /* We can't create the thread unless interrupts are enabled. */ 379 if (needwake && READ_ONCE(rtp->kthread_ptr)) 380 irq_work_queue(&rtpcp->rtp_irq_work); 381 } 382 383 // RCU callback function for rcu_barrier_tasks_generic(). 384 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) 385 { 386 struct rcu_tasks *rtp; 387 struct rcu_tasks_percpu *rtpcp; 388 389 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); 390 rtp = rtpcp->rtpp; 391 if (atomic_dec_and_test(&rtp->barrier_q_count)) 392 complete(&rtp->barrier_q_completion); 393 } 394 395 // Wait for all in-flight callbacks for the specified RCU Tasks flavor. 396 // Operates in a manner similar to rcu_barrier(). 397 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) 398 { 399 int cpu; 400 unsigned long flags; 401 struct rcu_tasks_percpu *rtpcp; 402 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); 403 404 mutex_lock(&rtp->barrier_q_mutex); 405 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { 406 smp_mb(); 407 mutex_unlock(&rtp->barrier_q_mutex); 408 return; 409 } 410 rcu_seq_start(&rtp->barrier_q_seq); 411 init_completion(&rtp->barrier_q_completion); 412 atomic_set(&rtp->barrier_q_count, 2); 413 for_each_possible_cpu(cpu) { 414 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) 415 break; 416 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 417 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; 418 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 419 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) 420 atomic_inc(&rtp->barrier_q_count); 421 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 422 } 423 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) 424 complete(&rtp->barrier_q_completion); 425 wait_for_completion(&rtp->barrier_q_completion); 426 rcu_seq_end(&rtp->barrier_q_seq); 427 mutex_unlock(&rtp->barrier_q_mutex); 428 } 429 430 // Advance callbacks and indicate whether either a grace period or 431 // callback invocation is needed. 432 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) 433 { 434 int cpu; 435 int dequeue_limit; 436 unsigned long flags; 437 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); 438 long n; 439 long ncbs = 0; 440 long ncbsnz = 0; 441 int needgpcb = 0; 442 443 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); 444 for (cpu = 0; cpu < dequeue_limit; cpu++) { 445 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 446 447 /* Advance and accelerate any new callbacks. */ 448 if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) 449 continue; 450 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 451 // Should we shrink down to a single callback queue? 452 n = rcu_segcblist_n_cbs(&rtpcp->cblist); 453 if (n) { 454 ncbs += n; 455 if (cpu > 0) 456 ncbsnz += n; 457 } 458 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 459 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 460 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { 461 if (rtp->lazy_jiffies) 462 rtpcp->urgent_gp--; 463 needgpcb |= 0x3; 464 } else if (rcu_segcblist_empty(&rtpcp->cblist)) { 465 rtpcp->urgent_gp = 0; 466 } 467 if (rcu_segcblist_ready_cbs(&rtpcp->cblist)) 468 needgpcb |= 0x1; 469 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 470 } 471 472 // Shrink down to a single callback queue if appropriate. 473 // This is done in two stages: (1) If there are no more than 474 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other 475 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, 476 // if there has not been an increase in callbacks, limit dequeuing 477 // to CPU 0. Note the matching RCU read-side critical section in 478 // call_rcu_tasks_generic(). 479 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { 480 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 481 if (rtp->percpu_enqueue_lim > 1) { 482 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); 483 smp_store_release(&rtp->percpu_enqueue_lim, 1); 484 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); 485 gpdone = false; 486 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); 487 } 488 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 489 } 490 if (rcu_task_cb_adjust && !ncbsnz && gpdone) { 491 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 492 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { 493 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); 494 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); 495 } 496 if (rtp->percpu_dequeue_lim == 1) { 497 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { 498 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 499 500 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); 501 } 502 } 503 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 504 } 505 506 return needgpcb; 507 } 508 509 // Advance callbacks and invoke any that are ready. 510 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) 511 { 512 int cpu; 513 int cpunext; 514 int cpuwq; 515 unsigned long flags; 516 int len; 517 struct rcu_head *rhp; 518 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 519 struct rcu_tasks_percpu *rtpcp_next; 520 521 cpu = rtpcp->cpu; 522 cpunext = cpu * 2 + 1; 523 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 524 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); 525 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND; 526 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); 527 cpunext++; 528 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 529 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); 530 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND; 531 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); 532 } 533 } 534 535 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu)) 536 return; 537 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 538 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 539 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); 540 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 541 len = rcl.len; 542 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { 543 debug_rcu_head_callback(rhp); 544 local_bh_disable(); 545 rhp->func(rhp); 546 local_bh_enable(); 547 cond_resched(); 548 } 549 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 550 rcu_segcblist_add_len(&rtpcp->cblist, -len); 551 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 552 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 553 } 554 555 // Workqueue flood to advance callbacks and invoke any that are ready. 556 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) 557 { 558 struct rcu_tasks *rtp; 559 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); 560 561 rtp = rtpcp->rtpp; 562 rcu_tasks_invoke_cbs(rtp, rtpcp); 563 } 564 565 // Wait for one grace period. 566 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) 567 { 568 int needgpcb; 569 570 mutex_lock(&rtp->tasks_gp_mutex); 571 572 // If there were none, wait a bit and start over. 573 if (unlikely(midboot)) { 574 needgpcb = 0x2; 575 } else { 576 mutex_unlock(&rtp->tasks_gp_mutex); 577 set_tasks_gp_state(rtp, RTGS_WAIT_CBS); 578 rcuwait_wait_event(&rtp->cbs_wait, 579 (needgpcb = rcu_tasks_need_gpcb(rtp)), 580 TASK_IDLE); 581 mutex_lock(&rtp->tasks_gp_mutex); 582 } 583 584 if (needgpcb & 0x2) { 585 // Wait for one grace period. 586 set_tasks_gp_state(rtp, RTGS_WAIT_GP); 587 rtp->gp_start = jiffies; 588 rcu_seq_start(&rtp->tasks_gp_seq); 589 rtp->gp_func(rtp); 590 rcu_seq_end(&rtp->tasks_gp_seq); 591 } 592 593 // Invoke callbacks. 594 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); 595 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); 596 mutex_unlock(&rtp->tasks_gp_mutex); 597 } 598 599 // RCU-tasks kthread that detects grace periods and invokes callbacks. 600 static int __noreturn rcu_tasks_kthread(void *arg) 601 { 602 int cpu; 603 struct rcu_tasks *rtp = arg; 604 605 for_each_possible_cpu(cpu) { 606 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 607 608 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); 609 rtpcp->urgent_gp = 1; 610 } 611 612 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 613 housekeeping_affine(current, HK_TYPE_RCU); 614 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! 615 616 /* 617 * Each pass through the following loop makes one check for 618 * newly arrived callbacks, and, if there are some, waits for 619 * one RCU-tasks grace period and then invokes the callbacks. 620 * This loop is terminated by the system going down. ;-) 621 */ 622 for (;;) { 623 // Wait for one grace period and invoke any callbacks 624 // that are ready. 625 rcu_tasks_one_gp(rtp, false); 626 627 // Paranoid sleep to keep this from entering a tight loop. 628 schedule_timeout_idle(rtp->gp_sleep); 629 } 630 } 631 632 // Wait for a grace period for the specified flavor of Tasks RCU. 633 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) 634 { 635 /* Complain if the scheduler has not started. */ 636 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 637 "synchronize_%s() called too soon", rtp->name)) 638 return; 639 640 // If the grace-period kthread is running, use it. 641 if (READ_ONCE(rtp->kthread_ptr)) { 642 wait_rcu_gp(rtp->call_func); 643 return; 644 } 645 rcu_tasks_one_gp(rtp, true); 646 } 647 648 /* Spawn RCU-tasks grace-period kthread. */ 649 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) 650 { 651 struct task_struct *t; 652 653 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); 654 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) 655 return; 656 smp_mb(); /* Ensure others see full kthread. */ 657 } 658 659 #ifndef CONFIG_TINY_RCU 660 661 /* 662 * Print any non-default Tasks RCU settings. 663 */ 664 static void __init rcu_tasks_bootup_oddness(void) 665 { 666 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 667 int rtsimc; 668 669 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) 670 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); 671 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); 672 if (rtsimc != rcu_task_stall_info_mult) { 673 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); 674 rcu_task_stall_info_mult = rtsimc; 675 } 676 #endif /* #ifdef CONFIG_TASKS_RCU */ 677 #ifdef CONFIG_TASKS_RCU 678 pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); 679 #endif /* #ifdef CONFIG_TASKS_RCU */ 680 #ifdef CONFIG_TASKS_RUDE_RCU 681 pr_info("\tRude variant of Tasks RCU enabled.\n"); 682 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 683 #ifdef CONFIG_TASKS_TRACE_RCU 684 pr_info("\tTracing variant of Tasks RCU enabled.\n"); 685 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 686 } 687 688 #endif /* #ifndef CONFIG_TINY_RCU */ 689 690 #ifndef CONFIG_TINY_RCU 691 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ 692 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) 693 { 694 int cpu; 695 bool havecbs = false; 696 bool haveurgent = false; 697 bool haveurgentcbs = false; 698 699 for_each_possible_cpu(cpu) { 700 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 701 702 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) 703 havecbs = true; 704 if (data_race(rtpcp->urgent_gp)) 705 haveurgent = true; 706 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) 707 haveurgentcbs = true; 708 if (havecbs && haveurgent && haveurgentcbs) 709 break; 710 } 711 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n", 712 rtp->kname, 713 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), 714 jiffies - data_race(rtp->gp_jiffies), 715 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), 716 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), 717 ".k"[!!data_race(rtp->kthread_ptr)], 718 ".C"[havecbs], 719 ".u"[haveurgent], 720 ".U"[haveurgentcbs], 721 rtp->lazy_jiffies, 722 s); 723 } 724 #endif // #ifndef CONFIG_TINY_RCU 725 726 static void exit_tasks_rcu_finish_trace(struct task_struct *t); 727 728 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 729 730 //////////////////////////////////////////////////////////////////////// 731 // 732 // Shared code between task-list-scanning variants of Tasks RCU. 733 734 /* Wait for one RCU-tasks grace period. */ 735 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) 736 { 737 struct task_struct *g; 738 int fract; 739 LIST_HEAD(holdouts); 740 unsigned long j; 741 unsigned long lastinfo; 742 unsigned long lastreport; 743 bool reported = false; 744 int rtsi; 745 struct task_struct *t; 746 747 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); 748 rtp->pregp_func(&holdouts); 749 750 /* 751 * There were callbacks, so we need to wait for an RCU-tasks 752 * grace period. Start off by scanning the task list for tasks 753 * that are not already voluntarily blocked. Mark these tasks 754 * and make a list of them in holdouts. 755 */ 756 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); 757 if (rtp->pertask_func) { 758 rcu_read_lock(); 759 for_each_process_thread(g, t) 760 rtp->pertask_func(t, &holdouts); 761 rcu_read_unlock(); 762 } 763 764 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); 765 rtp->postscan_func(&holdouts); 766 767 /* 768 * Each pass through the following loop scans the list of holdout 769 * tasks, removing any that are no longer holdouts. When the list 770 * is empty, we are done. 771 */ 772 lastreport = jiffies; 773 lastinfo = lastreport; 774 rtsi = READ_ONCE(rcu_task_stall_info); 775 776 // Start off with initial wait and slowly back off to 1 HZ wait. 777 fract = rtp->init_fract; 778 779 while (!list_empty(&holdouts)) { 780 ktime_t exp; 781 bool firstreport; 782 bool needreport; 783 int rtst; 784 785 // Slowly back off waiting for holdouts 786 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); 787 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 788 schedule_timeout_idle(fract); 789 } else { 790 exp = jiffies_to_nsecs(fract); 791 __set_current_state(TASK_IDLE); 792 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD); 793 } 794 795 if (fract < HZ) 796 fract++; 797 798 rtst = READ_ONCE(rcu_task_stall_timeout); 799 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); 800 if (needreport) { 801 lastreport = jiffies; 802 reported = true; 803 } 804 firstreport = true; 805 WARN_ON(signal_pending(current)); 806 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); 807 rtp->holdouts_func(&holdouts, needreport, &firstreport); 808 809 // Print pre-stall informational messages if needed. 810 j = jiffies; 811 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { 812 lastinfo = j; 813 rtsi = rtsi * rcu_task_stall_info_mult; 814 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n", 815 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); 816 } 817 } 818 819 set_tasks_gp_state(rtp, RTGS_POST_GP); 820 rtp->postgp_func(rtp); 821 } 822 823 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ 824 825 #ifdef CONFIG_TASKS_RCU 826 827 //////////////////////////////////////////////////////////////////////// 828 // 829 // Simple variant of RCU whose quiescent states are voluntary context 830 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. 831 // As such, grace periods can take one good long time. There are no 832 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() 833 // because this implementation is intended to get the system into a safe 834 // state for some of the manipulations involved in tracing and the like. 835 // Finally, this implementation does not support high call_rcu_tasks() 836 // rates from multiple CPUs. If this is required, per-CPU callback lists 837 // will be needed. 838 // 839 // The implementation uses rcu_tasks_wait_gp(), which relies on function 840 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() 841 // function sets these function pointers up so that rcu_tasks_wait_gp() 842 // invokes these functions in this order: 843 // 844 // rcu_tasks_pregp_step(): 845 // Invokes synchronize_rcu() in order to wait for all in-flight 846 // t->on_rq and t->nvcsw transitions to complete. This works because 847 // all such transitions are carried out with interrupts disabled. 848 // rcu_tasks_pertask(), invoked on every non-idle task: 849 // For every runnable non-idle task other than the current one, use 850 // get_task_struct() to pin down that task, snapshot that task's 851 // number of voluntary context switches, and add that task to the 852 // holdout list. 853 // rcu_tasks_postscan(): 854 // Invoke synchronize_srcu() to ensure that all tasks that were 855 // in the process of exiting (and which thus might not know to 856 // synchronize with this RCU Tasks grace period) have completed 857 // exiting. 858 // check_all_holdout_tasks(), repeatedly until holdout list is empty: 859 // Scans the holdout list, attempting to identify a quiescent state 860 // for each task on the list. If there is a quiescent state, the 861 // corresponding task is removed from the holdout list. 862 // rcu_tasks_postgp(): 863 // Invokes synchronize_rcu() in order to ensure that all prior 864 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks 865 // to have happened before the end of this RCU Tasks grace period. 866 // Again, this works because all such transitions are carried out 867 // with interrupts disabled. 868 // 869 // For each exiting task, the exit_tasks_rcu_start() and 870 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU 871 // read-side critical sections waited for by rcu_tasks_postscan(). 872 // 873 // Pre-grace-period update-side code is ordered before the grace 874 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code 875 // is ordered before the grace period via synchronize_rcu() call in 876 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt 877 // disabling. 878 879 /* Pre-grace-period preparation. */ 880 static void rcu_tasks_pregp_step(struct list_head *hop) 881 { 882 /* 883 * Wait for all pre-existing t->on_rq and t->nvcsw transitions 884 * to complete. Invoking synchronize_rcu() suffices because all 885 * these transitions occur with interrupts disabled. Without this 886 * synchronize_rcu(), a read-side critical section that started 887 * before the grace period might be incorrectly seen as having 888 * started after the grace period. 889 * 890 * This synchronize_rcu() also dispenses with the need for a 891 * memory barrier on the first store to t->rcu_tasks_holdout, 892 * as it forces the store to happen after the beginning of the 893 * grace period. 894 */ 895 synchronize_rcu(); 896 } 897 898 /* Per-task initial processing. */ 899 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) 900 { 901 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { 902 get_task_struct(t); 903 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 904 WRITE_ONCE(t->rcu_tasks_holdout, true); 905 list_add(&t->rcu_tasks_holdout_list, hop); 906 } 907 } 908 909 /* Processing between scanning taskslist and draining the holdout list. */ 910 static void rcu_tasks_postscan(struct list_head *hop) 911 { 912 int rtsi = READ_ONCE(rcu_task_stall_info); 913 914 if (!IS_ENABLED(CONFIG_TINY_RCU)) { 915 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; 916 add_timer(&tasks_rcu_exit_srcu_stall_timer); 917 } 918 919 /* 920 * Exiting tasks may escape the tasklist scan. Those are vulnerable 921 * until their final schedule() with TASK_DEAD state. To cope with 922 * this, divide the fragile exit path part in two intersecting 923 * read side critical sections: 924 * 925 * 1) An _SRCU_ read side starting before calling exit_notify(), 926 * which may remove the task from the tasklist, and ending after 927 * the final preempt_disable() call in do_exit(). 928 * 929 * 2) An _RCU_ read side starting with the final preempt_disable() 930 * call in do_exit() and ending with the final call to schedule() 931 * with TASK_DEAD state. 932 * 933 * This handles the part 1). And postgp will handle part 2) with a 934 * call to synchronize_rcu(). 935 */ 936 synchronize_srcu(&tasks_rcu_exit_srcu); 937 938 if (!IS_ENABLED(CONFIG_TINY_RCU)) 939 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); 940 } 941 942 /* See if tasks are still holding out, complain if so. */ 943 static void check_holdout_task(struct task_struct *t, 944 bool needreport, bool *firstreport) 945 { 946 int cpu; 947 948 if (!READ_ONCE(t->rcu_tasks_holdout) || 949 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 950 !READ_ONCE(t->on_rq) || 951 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 952 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 953 WRITE_ONCE(t->rcu_tasks_holdout, false); 954 list_del_init(&t->rcu_tasks_holdout_list); 955 put_task_struct(t); 956 return; 957 } 958 rcu_request_urgent_qs_task(t); 959 if (!needreport) 960 return; 961 if (*firstreport) { 962 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 963 *firstreport = false; 964 } 965 cpu = task_cpu(t); 966 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 967 t, ".I"[is_idle_task(t)], 968 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 969 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 970 t->rcu_tasks_idle_cpu, cpu); 971 sched_show_task(t); 972 } 973 974 /* Scan the holdout lists for tasks no longer holding out. */ 975 static void check_all_holdout_tasks(struct list_head *hop, 976 bool needreport, bool *firstreport) 977 { 978 struct task_struct *t, *t1; 979 980 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { 981 check_holdout_task(t, needreport, firstreport); 982 cond_resched(); 983 } 984 } 985 986 /* Finish off the Tasks-RCU grace period. */ 987 static void rcu_tasks_postgp(struct rcu_tasks *rtp) 988 { 989 /* 990 * Because ->on_rq and ->nvcsw are not guaranteed to have a full 991 * memory barriers prior to them in the schedule() path, memory 992 * reordering on other CPUs could cause their RCU-tasks read-side 993 * critical sections to extend past the end of the grace period. 994 * However, because these ->nvcsw updates are carried out with 995 * interrupts disabled, we can use synchronize_rcu() to force the 996 * needed ordering on all such CPUs. 997 * 998 * This synchronize_rcu() also confines all ->rcu_tasks_holdout 999 * accesses to be within the grace period, avoiding the need for 1000 * memory barriers for ->rcu_tasks_holdout accesses. 1001 * 1002 * In addition, this synchronize_rcu() waits for exiting tasks 1003 * to complete their final preempt_disable() region of execution, 1004 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu), 1005 * enforcing the whole region before tasklist removal until 1006 * the final schedule() with TASK_DEAD state to be an RCU TASKS 1007 * read side critical section. 1008 */ 1009 synchronize_rcu(); 1010 } 1011 1012 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); 1013 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); 1014 1015 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) 1016 { 1017 #ifndef CONFIG_TINY_RCU 1018 int rtsi; 1019 1020 rtsi = READ_ONCE(rcu_task_stall_info); 1021 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n", 1022 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq, 1023 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); 1024 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n"); 1025 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; 1026 add_timer(&tasks_rcu_exit_srcu_stall_timer); 1027 #endif // #ifndef CONFIG_TINY_RCU 1028 } 1029 1030 /** 1031 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 1032 * @rhp: structure to be used for queueing the RCU updates. 1033 * @func: actual callback function to be invoked after the grace period 1034 * 1035 * The callback function will be invoked some time after a full grace 1036 * period elapses, in other words after all currently executing RCU 1037 * read-side critical sections have completed. call_rcu_tasks() assumes 1038 * that the read-side critical sections end at a voluntary context 1039 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, 1040 * or transition to usermode execution. As such, there are no read-side 1041 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 1042 * this primitive is intended to determine that all tasks have passed 1043 * through a safe state, not so much for data-structure synchronization. 1044 * 1045 * See the description of call_rcu() for more detailed information on 1046 * memory ordering guarantees. 1047 */ 1048 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 1049 { 1050 call_rcu_tasks_generic(rhp, func, &rcu_tasks); 1051 } 1052 EXPORT_SYMBOL_GPL(call_rcu_tasks); 1053 1054 /** 1055 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 1056 * 1057 * Control will return to the caller some time after a full rcu-tasks 1058 * grace period has elapsed, in other words after all currently 1059 * executing rcu-tasks read-side critical sections have elapsed. These 1060 * read-side critical sections are delimited by calls to schedule(), 1061 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls 1062 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 1063 * 1064 * This is a very specialized primitive, intended only for a few uses in 1065 * tracing and other situations requiring manipulation of function 1066 * preambles and profiling hooks. The synchronize_rcu_tasks() function 1067 * is not (yet) intended for heavy use from multiple CPUs. 1068 * 1069 * See the description of synchronize_rcu() for more detailed information 1070 * on memory ordering guarantees. 1071 */ 1072 void synchronize_rcu_tasks(void) 1073 { 1074 synchronize_rcu_tasks_generic(&rcu_tasks); 1075 } 1076 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 1077 1078 /** 1079 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 1080 * 1081 * Although the current implementation is guaranteed to wait, it is not 1082 * obligated to, for example, if there are no pending callbacks. 1083 */ 1084 void rcu_barrier_tasks(void) 1085 { 1086 rcu_barrier_tasks_generic(&rcu_tasks); 1087 } 1088 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 1089 1090 static int rcu_tasks_lazy_ms = -1; 1091 module_param(rcu_tasks_lazy_ms, int, 0444); 1092 1093 static int __init rcu_spawn_tasks_kthread(void) 1094 { 1095 cblist_init_generic(&rcu_tasks); 1096 rcu_tasks.gp_sleep = HZ / 10; 1097 rcu_tasks.init_fract = HZ / 10; 1098 if (rcu_tasks_lazy_ms >= 0) 1099 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms); 1100 rcu_tasks.pregp_func = rcu_tasks_pregp_step; 1101 rcu_tasks.pertask_func = rcu_tasks_pertask; 1102 rcu_tasks.postscan_func = rcu_tasks_postscan; 1103 rcu_tasks.holdouts_func = check_all_holdout_tasks; 1104 rcu_tasks.postgp_func = rcu_tasks_postgp; 1105 rcu_spawn_tasks_kthread_generic(&rcu_tasks); 1106 return 0; 1107 } 1108 1109 #if !defined(CONFIG_TINY_RCU) 1110 void show_rcu_tasks_classic_gp_kthread(void) 1111 { 1112 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); 1113 } 1114 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); 1115 #endif // !defined(CONFIG_TINY_RCU) 1116 1117 struct task_struct *get_rcu_tasks_gp_kthread(void) 1118 { 1119 return rcu_tasks.kthread_ptr; 1120 } 1121 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); 1122 1123 /* 1124 * Contribute to protect against tasklist scan blind spot while the 1125 * task is exiting and may be removed from the tasklist. See 1126 * corresponding synchronize_srcu() for further details. 1127 */ 1128 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) 1129 { 1130 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); 1131 } 1132 1133 /* 1134 * Contribute to protect against tasklist scan blind spot while the 1135 * task is exiting and may be removed from the tasklist. See 1136 * corresponding synchronize_srcu() for further details. 1137 */ 1138 void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu) 1139 { 1140 struct task_struct *t = current; 1141 1142 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); 1143 } 1144 1145 /* 1146 * Contribute to protect against tasklist scan blind spot while the 1147 * task is exiting and may be removed from the tasklist. See 1148 * corresponding synchronize_srcu() for further details. 1149 */ 1150 void exit_tasks_rcu_finish(void) 1151 { 1152 exit_tasks_rcu_stop(); 1153 exit_tasks_rcu_finish_trace(current); 1154 } 1155 1156 #else /* #ifdef CONFIG_TASKS_RCU */ 1157 void exit_tasks_rcu_start(void) { } 1158 void exit_tasks_rcu_stop(void) { } 1159 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } 1160 #endif /* #else #ifdef CONFIG_TASKS_RCU */ 1161 1162 #ifdef CONFIG_TASKS_RUDE_RCU 1163 1164 //////////////////////////////////////////////////////////////////////// 1165 // 1166 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of 1167 // passing an empty function to schedule_on_each_cpu(). This approach 1168 // provides an asynchronous call_rcu_tasks_rude() API and batching of 1169 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. 1170 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide 1171 // and induces otherwise unnecessary context switches on all online CPUs, 1172 // whether idle or not. 1173 // 1174 // Callback handling is provided by the rcu_tasks_kthread() function. 1175 // 1176 // Ordering is provided by the scheduler's context-switch code. 1177 1178 // Empty function to allow workqueues to force a context switch. 1179 static void rcu_tasks_be_rude(struct work_struct *work) 1180 { 1181 } 1182 1183 // Wait for one rude RCU-tasks grace period. 1184 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) 1185 { 1186 rtp->n_ipis += cpumask_weight(cpu_online_mask); 1187 schedule_on_each_cpu(rcu_tasks_be_rude); 1188 } 1189 1190 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); 1191 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, 1192 "RCU Tasks Rude"); 1193 1194 /** 1195 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period 1196 * @rhp: structure to be used for queueing the RCU updates. 1197 * @func: actual callback function to be invoked after the grace period 1198 * 1199 * The callback function will be invoked some time after a full grace 1200 * period elapses, in other words after all currently executing RCU 1201 * read-side critical sections have completed. call_rcu_tasks_rude() 1202 * assumes that the read-side critical sections end at context switch, 1203 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as 1204 * usermode execution is schedulable). As such, there are no read-side 1205 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 1206 * this primitive is intended to determine that all tasks have passed 1207 * through a safe state, not so much for data-structure synchronization. 1208 * 1209 * See the description of call_rcu() for more detailed information on 1210 * memory ordering guarantees. 1211 */ 1212 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) 1213 { 1214 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); 1215 } 1216 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); 1217 1218 /** 1219 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period 1220 * 1221 * Control will return to the caller some time after a rude rcu-tasks 1222 * grace period has elapsed, in other words after all currently 1223 * executing rcu-tasks read-side critical sections have elapsed. These 1224 * read-side critical sections are delimited by calls to schedule(), 1225 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable 1226 * context), and (in theory, anyway) cond_resched(). 1227 * 1228 * This is a very specialized primitive, intended only for a few uses in 1229 * tracing and other situations requiring manipulation of function preambles 1230 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not 1231 * (yet) intended for heavy use from multiple CPUs. 1232 * 1233 * See the description of synchronize_rcu() for more detailed information 1234 * on memory ordering guarantees. 1235 */ 1236 void synchronize_rcu_tasks_rude(void) 1237 { 1238 synchronize_rcu_tasks_generic(&rcu_tasks_rude); 1239 } 1240 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); 1241 1242 /** 1243 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. 1244 * 1245 * Although the current implementation is guaranteed to wait, it is not 1246 * obligated to, for example, if there are no pending callbacks. 1247 */ 1248 void rcu_barrier_tasks_rude(void) 1249 { 1250 rcu_barrier_tasks_generic(&rcu_tasks_rude); 1251 } 1252 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); 1253 1254 int rcu_tasks_rude_lazy_ms = -1; 1255 module_param(rcu_tasks_rude_lazy_ms, int, 0444); 1256 1257 static int __init rcu_spawn_tasks_rude_kthread(void) 1258 { 1259 cblist_init_generic(&rcu_tasks_rude); 1260 rcu_tasks_rude.gp_sleep = HZ / 10; 1261 if (rcu_tasks_rude_lazy_ms >= 0) 1262 rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms); 1263 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); 1264 return 0; 1265 } 1266 1267 #if !defined(CONFIG_TINY_RCU) 1268 void show_rcu_tasks_rude_gp_kthread(void) 1269 { 1270 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); 1271 } 1272 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); 1273 #endif // !defined(CONFIG_TINY_RCU) 1274 1275 struct task_struct *get_rcu_tasks_rude_gp_kthread(void) 1276 { 1277 return rcu_tasks_rude.kthread_ptr; 1278 } 1279 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); 1280 1281 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 1282 1283 //////////////////////////////////////////////////////////////////////// 1284 // 1285 // Tracing variant of Tasks RCU. This variant is designed to be used 1286 // to protect tracing hooks, including those of BPF. This variant 1287 // therefore: 1288 // 1289 // 1. Has explicit read-side markers to allow finite grace periods 1290 // in the face of in-kernel loops for PREEMPT=n builds. 1291 // 1292 // 2. Protects code in the idle loop, exception entry/exit, and 1293 // CPU-hotplug code paths, similar to the capabilities of SRCU. 1294 // 1295 // 3. Avoids expensive read-side instructions, having overhead similar 1296 // to that of Preemptible RCU. 1297 // 1298 // There are of course downsides. For example, the grace-period code 1299 // can send IPIs to CPUs, even when those CPUs are in the idle loop or 1300 // in nohz_full userspace. If needed, these downsides can be at least 1301 // partially remedied. 1302 // 1303 // Perhaps most important, this variant of RCU does not affect the vanilla 1304 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace 1305 // readers can operate from idle, offline, and exception entry/exit in no 1306 // way allows rcu_preempt and rcu_sched readers to also do so. 1307 // 1308 // The implementation uses rcu_tasks_wait_gp(), which relies on function 1309 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() 1310 // function sets these function pointers up so that rcu_tasks_wait_gp() 1311 // invokes these functions in this order: 1312 // 1313 // rcu_tasks_trace_pregp_step(): 1314 // Disables CPU hotplug, adds all currently executing tasks to the 1315 // holdout list, then checks the state of all tasks that blocked 1316 // or were preempted within their current RCU Tasks Trace read-side 1317 // critical section, adding them to the holdout list if appropriate. 1318 // Finally, this function re-enables CPU hotplug. 1319 // The ->pertask_func() pointer is NULL, so there is no per-task processing. 1320 // rcu_tasks_trace_postscan(): 1321 // Invokes synchronize_rcu() to wait for late-stage exiting tasks 1322 // to finish exiting. 1323 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: 1324 // Scans the holdout list, attempting to identify a quiescent state 1325 // for each task on the list. If there is a quiescent state, the 1326 // corresponding task is removed from the holdout list. Once this 1327 // list is empty, the grace period has completed. 1328 // rcu_tasks_trace_postgp(): 1329 // Provides the needed full memory barrier and does debug checks. 1330 // 1331 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. 1332 // 1333 // Pre-grace-period update-side code is ordered before the grace period 1334 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period 1335 // read-side code is ordered before the grace period by atomic operations 1336 // on .b.need_qs flag of each task involved in this process, or by scheduler 1337 // context-switch ordering (for locked-down non-running readers). 1338 1339 // The lockdep state must be outside of #ifdef to be useful. 1340 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1341 static struct lock_class_key rcu_lock_trace_key; 1342 struct lockdep_map rcu_trace_lock_map = 1343 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); 1344 EXPORT_SYMBOL_GPL(rcu_trace_lock_map); 1345 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 1346 1347 #ifdef CONFIG_TASKS_TRACE_RCU 1348 1349 // Record outstanding IPIs to each CPU. No point in sending two... 1350 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); 1351 1352 // The number of detections of task quiescent state relying on 1353 // heavyweight readers executing explicit memory barriers. 1354 static unsigned long n_heavy_reader_attempts; 1355 static unsigned long n_heavy_reader_updates; 1356 static unsigned long n_heavy_reader_ofl_updates; 1357 static unsigned long n_trc_holdouts; 1358 1359 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); 1360 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, 1361 "RCU Tasks Trace"); 1362 1363 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ 1364 static u8 rcu_ld_need_qs(struct task_struct *t) 1365 { 1366 smp_mb(); // Enforce full grace-period ordering. 1367 return smp_load_acquire(&t->trc_reader_special.b.need_qs); 1368 } 1369 1370 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ 1371 static void rcu_st_need_qs(struct task_struct *t, u8 v) 1372 { 1373 smp_store_release(&t->trc_reader_special.b.need_qs, v); 1374 smp_mb(); // Enforce full grace-period ordering. 1375 } 1376 1377 /* 1378 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for 1379 * the four-byte operand-size restriction of some platforms. 1380 * Returns the old value, which is often ignored. 1381 */ 1382 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) 1383 { 1384 union rcu_special ret; 1385 union rcu_special trs_old = READ_ONCE(t->trc_reader_special); 1386 union rcu_special trs_new = trs_old; 1387 1388 if (trs_old.b.need_qs != old) 1389 return trs_old.b.need_qs; 1390 trs_new.b.need_qs = new; 1391 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s); 1392 return ret.b.need_qs; 1393 } 1394 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); 1395 1396 /* 1397 * If we are the last reader, signal the grace-period kthread. 1398 * Also remove from the per-CPU list of blocked tasks. 1399 */ 1400 void rcu_read_unlock_trace_special(struct task_struct *t) 1401 { 1402 unsigned long flags; 1403 struct rcu_tasks_percpu *rtpcp; 1404 union rcu_special trs; 1405 1406 // Open-coded full-word version of rcu_ld_need_qs(). 1407 smp_mb(); // Enforce full grace-period ordering. 1408 trs = smp_load_acquire(&t->trc_reader_special); 1409 1410 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) 1411 smp_mb(); // Pairs with update-side barriers. 1412 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. 1413 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { 1414 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, 1415 TRC_NEED_QS_CHECKED); 1416 1417 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); 1418 } 1419 if (trs.b.blocked) { 1420 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); 1421 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1422 list_del_init(&t->trc_blkd_node); 1423 WRITE_ONCE(t->trc_reader_special.b.blocked, false); 1424 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1425 } 1426 WRITE_ONCE(t->trc_reader_nesting, 0); 1427 } 1428 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); 1429 1430 /* Add a newly blocked reader task to its CPU's list. */ 1431 void rcu_tasks_trace_qs_blkd(struct task_struct *t) 1432 { 1433 unsigned long flags; 1434 struct rcu_tasks_percpu *rtpcp; 1435 1436 local_irq_save(flags); 1437 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); 1438 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled 1439 t->trc_blkd_cpu = smp_processor_id(); 1440 if (!rtpcp->rtp_blkd_tasks.next) 1441 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 1442 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); 1443 WRITE_ONCE(t->trc_reader_special.b.blocked, true); 1444 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1445 } 1446 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); 1447 1448 /* Add a task to the holdout list, if it is not already on the list. */ 1449 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) 1450 { 1451 if (list_empty(&t->trc_holdout_list)) { 1452 get_task_struct(t); 1453 list_add(&t->trc_holdout_list, bhp); 1454 n_trc_holdouts++; 1455 } 1456 } 1457 1458 /* Remove a task from the holdout list, if it is in fact present. */ 1459 static void trc_del_holdout(struct task_struct *t) 1460 { 1461 if (!list_empty(&t->trc_holdout_list)) { 1462 list_del_init(&t->trc_holdout_list); 1463 put_task_struct(t); 1464 n_trc_holdouts--; 1465 } 1466 } 1467 1468 /* IPI handler to check task state. */ 1469 static void trc_read_check_handler(void *t_in) 1470 { 1471 int nesting; 1472 struct task_struct *t = current; 1473 struct task_struct *texp = t_in; 1474 1475 // If the task is no longer running on this CPU, leave. 1476 if (unlikely(texp != t)) 1477 goto reset_ipi; // Already on holdout list, so will check later. 1478 1479 // If the task is not in a read-side critical section, and 1480 // if this is the last reader, awaken the grace-period kthread. 1481 nesting = READ_ONCE(t->trc_reader_nesting); 1482 if (likely(!nesting)) { 1483 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1484 goto reset_ipi; 1485 } 1486 // If we are racing with an rcu_read_unlock_trace(), try again later. 1487 if (unlikely(nesting < 0)) 1488 goto reset_ipi; 1489 1490 // Get here if the task is in a read-side critical section. 1491 // Set its state so that it will update state for the grace-period 1492 // kthread upon exit from that critical section. 1493 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); 1494 1495 reset_ipi: 1496 // Allow future IPIs to be sent on CPU and for task. 1497 // Also order this IPI handler against any later manipulations of 1498 // the intended task. 1499 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ 1500 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ 1501 } 1502 1503 /* Callback function for scheduler to check locked-down task. */ 1504 static int trc_inspect_reader(struct task_struct *t, void *bhp_in) 1505 { 1506 struct list_head *bhp = bhp_in; 1507 int cpu = task_cpu(t); 1508 int nesting; 1509 bool ofl = cpu_is_offline(cpu); 1510 1511 if (task_curr(t) && !ofl) { 1512 // If no chance of heavyweight readers, do it the hard way. 1513 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) 1514 return -EINVAL; 1515 1516 // If heavyweight readers are enabled on the remote task, 1517 // we can inspect its state despite its currently running. 1518 // However, we cannot safely change its state. 1519 n_heavy_reader_attempts++; 1520 // Check for "running" idle tasks on offline CPUs. 1521 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) 1522 return -EINVAL; // No quiescent state, do it the hard way. 1523 n_heavy_reader_updates++; 1524 nesting = 0; 1525 } else { 1526 // The task is not running, so C-language access is safe. 1527 nesting = t->trc_reader_nesting; 1528 WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t)); 1529 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) 1530 n_heavy_reader_ofl_updates++; 1531 } 1532 1533 // If not exiting a read-side critical section, mark as checked 1534 // so that the grace-period kthread will remove it from the 1535 // holdout list. 1536 if (!nesting) { 1537 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1538 return 0; // In QS, so done. 1539 } 1540 if (nesting < 0) 1541 return -EINVAL; // Reader transitioning, try again later. 1542 1543 // The task is in a read-side critical section, so set up its 1544 // state so that it will update state upon exit from that critical 1545 // section. 1546 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) 1547 trc_add_holdout(t, bhp); 1548 return 0; 1549 } 1550 1551 /* Attempt to extract the state for the specified task. */ 1552 static void trc_wait_for_one_reader(struct task_struct *t, 1553 struct list_head *bhp) 1554 { 1555 int cpu; 1556 1557 // If a previous IPI is still in flight, let it complete. 1558 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI 1559 return; 1560 1561 // The current task had better be in a quiescent state. 1562 if (t == current) { 1563 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1564 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1565 return; 1566 } 1567 1568 // Attempt to nail down the task for inspection. 1569 get_task_struct(t); 1570 if (!task_call_func(t, trc_inspect_reader, bhp)) { 1571 put_task_struct(t); 1572 return; 1573 } 1574 put_task_struct(t); 1575 1576 // If this task is not yet on the holdout list, then we are in 1577 // an RCU read-side critical section. Otherwise, the invocation of 1578 // trc_add_holdout() that added it to the list did the necessary 1579 // get_task_struct(). Either way, the task cannot be freed out 1580 // from under this code. 1581 1582 // If currently running, send an IPI, either way, add to list. 1583 trc_add_holdout(t, bhp); 1584 if (task_curr(t) && 1585 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { 1586 // The task is currently running, so try IPIing it. 1587 cpu = task_cpu(t); 1588 1589 // If there is already an IPI outstanding, let it happen. 1590 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) 1591 return; 1592 1593 per_cpu(trc_ipi_to_cpu, cpu) = true; 1594 t->trc_ipi_to_cpu = cpu; 1595 rcu_tasks_trace.n_ipis++; 1596 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { 1597 // Just in case there is some other reason for 1598 // failure than the target CPU being offline. 1599 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", 1600 __func__, cpu); 1601 rcu_tasks_trace.n_ipis_fails++; 1602 per_cpu(trc_ipi_to_cpu, cpu) = false; 1603 t->trc_ipi_to_cpu = -1; 1604 } 1605 } 1606 } 1607 1608 /* 1609 * Initialize for first-round processing for the specified task. 1610 * Return false if task is NULL or already taken care of, true otherwise. 1611 */ 1612 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) 1613 { 1614 // During early boot when there is only the one boot CPU, there 1615 // is no idle task for the other CPUs. Also, the grace-period 1616 // kthread is always in a quiescent state. In addition, just return 1617 // if this task is already on the list. 1618 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) 1619 return false; 1620 1621 rcu_st_need_qs(t, 0); 1622 t->trc_ipi_to_cpu = -1; 1623 return true; 1624 } 1625 1626 /* Do first-round processing for the specified task. */ 1627 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) 1628 { 1629 if (rcu_tasks_trace_pertask_prep(t, true)) 1630 trc_wait_for_one_reader(t, hop); 1631 } 1632 1633 /* Initialize for a new RCU-tasks-trace grace period. */ 1634 static void rcu_tasks_trace_pregp_step(struct list_head *hop) 1635 { 1636 LIST_HEAD(blkd_tasks); 1637 int cpu; 1638 unsigned long flags; 1639 struct rcu_tasks_percpu *rtpcp; 1640 struct task_struct *t; 1641 1642 // There shouldn't be any old IPIs, but... 1643 for_each_possible_cpu(cpu) 1644 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); 1645 1646 // Disable CPU hotplug across the CPU scan for the benefit of 1647 // any IPIs that might be needed. This also waits for all readers 1648 // in CPU-hotplug code paths. 1649 cpus_read_lock(); 1650 1651 // These rcu_tasks_trace_pertask_prep() calls are serialized to 1652 // allow safe access to the hop list. 1653 for_each_online_cpu(cpu) { 1654 rcu_read_lock(); 1655 t = cpu_curr_snapshot(cpu); 1656 if (rcu_tasks_trace_pertask_prep(t, true)) 1657 trc_add_holdout(t, hop); 1658 rcu_read_unlock(); 1659 cond_resched_tasks_rcu_qs(); 1660 } 1661 1662 // Only after all running tasks have been accounted for is it 1663 // safe to take care of the tasks that have blocked within their 1664 // current RCU tasks trace read-side critical section. 1665 for_each_possible_cpu(cpu) { 1666 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); 1667 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1668 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); 1669 while (!list_empty(&blkd_tasks)) { 1670 rcu_read_lock(); 1671 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); 1672 list_del_init(&t->trc_blkd_node); 1673 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); 1674 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1675 rcu_tasks_trace_pertask(t, hop); 1676 rcu_read_unlock(); 1677 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1678 } 1679 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1680 cond_resched_tasks_rcu_qs(); 1681 } 1682 1683 // Re-enable CPU hotplug now that the holdout list is populated. 1684 cpus_read_unlock(); 1685 } 1686 1687 /* 1688 * Do intermediate processing between task and holdout scans. 1689 */ 1690 static void rcu_tasks_trace_postscan(struct list_head *hop) 1691 { 1692 // Wait for late-stage exiting tasks to finish exiting. 1693 // These might have passed the call to exit_tasks_rcu_finish(). 1694 1695 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! 1696 synchronize_rcu(); 1697 // Any tasks that exit after this point will set 1698 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. 1699 } 1700 1701 /* Communicate task state back to the RCU tasks trace stall warning request. */ 1702 struct trc_stall_chk_rdr { 1703 int nesting; 1704 int ipi_to_cpu; 1705 u8 needqs; 1706 }; 1707 1708 static int trc_check_slow_task(struct task_struct *t, void *arg) 1709 { 1710 struct trc_stall_chk_rdr *trc_rdrp = arg; 1711 1712 if (task_curr(t) && cpu_online(task_cpu(t))) 1713 return false; // It is running, so decline to inspect it. 1714 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); 1715 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); 1716 trc_rdrp->needqs = rcu_ld_need_qs(t); 1717 return true; 1718 } 1719 1720 /* Show the state of a task stalling the current RCU tasks trace GP. */ 1721 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) 1722 { 1723 int cpu; 1724 struct trc_stall_chk_rdr trc_rdr; 1725 bool is_idle_tsk = is_idle_task(t); 1726 1727 if (*firstreport) { 1728 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); 1729 *firstreport = false; 1730 } 1731 cpu = task_cpu(t); 1732 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) 1733 pr_alert("P%d: %c%c\n", 1734 t->pid, 1735 ".I"[t->trc_ipi_to_cpu >= 0], 1736 ".i"[is_idle_tsk]); 1737 else 1738 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", 1739 t->pid, 1740 ".I"[trc_rdr.ipi_to_cpu >= 0], 1741 ".i"[is_idle_tsk], 1742 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], 1743 ".B"[!!data_race(t->trc_reader_special.b.blocked)], 1744 trc_rdr.nesting, 1745 " !CN"[trc_rdr.needqs & 0x3], 1746 " ?"[trc_rdr.needqs > 0x3], 1747 cpu, cpu_online(cpu) ? "" : "(offline)"); 1748 sched_show_task(t); 1749 } 1750 1751 /* List stalled IPIs for RCU tasks trace. */ 1752 static void show_stalled_ipi_trace(void) 1753 { 1754 int cpu; 1755 1756 for_each_possible_cpu(cpu) 1757 if (per_cpu(trc_ipi_to_cpu, cpu)) 1758 pr_alert("\tIPI outstanding to CPU %d\n", cpu); 1759 } 1760 1761 /* Do one scan of the holdout list. */ 1762 static void check_all_holdout_tasks_trace(struct list_head *hop, 1763 bool needreport, bool *firstreport) 1764 { 1765 struct task_struct *g, *t; 1766 1767 // Disable CPU hotplug across the holdout list scan for IPIs. 1768 cpus_read_lock(); 1769 1770 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { 1771 // If safe and needed, try to check the current task. 1772 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && 1773 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) 1774 trc_wait_for_one_reader(t, hop); 1775 1776 // If check succeeded, remove this task from the list. 1777 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && 1778 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) 1779 trc_del_holdout(t); 1780 else if (needreport) 1781 show_stalled_task_trace(t, firstreport); 1782 cond_resched_tasks_rcu_qs(); 1783 } 1784 1785 // Re-enable CPU hotplug now that the holdout list scan has completed. 1786 cpus_read_unlock(); 1787 1788 if (needreport) { 1789 if (*firstreport) 1790 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); 1791 show_stalled_ipi_trace(); 1792 } 1793 } 1794 1795 static void rcu_tasks_trace_empty_fn(void *unused) 1796 { 1797 } 1798 1799 /* Wait for grace period to complete and provide ordering. */ 1800 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) 1801 { 1802 int cpu; 1803 1804 // Wait for any lingering IPI handlers to complete. Note that 1805 // if a CPU has gone offline or transitioned to userspace in the 1806 // meantime, all IPI handlers should have been drained beforehand. 1807 // Yes, this assumes that CPUs process IPIs in order. If that ever 1808 // changes, there will need to be a recheck and/or timed wait. 1809 for_each_online_cpu(cpu) 1810 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) 1811 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); 1812 1813 smp_mb(); // Caller's code must be ordered after wakeup. 1814 // Pairs with pretty much every ordering primitive. 1815 } 1816 1817 /* Report any needed quiescent state for this exiting task. */ 1818 static void exit_tasks_rcu_finish_trace(struct task_struct *t) 1819 { 1820 union rcu_special trs = READ_ONCE(t->trc_reader_special); 1821 1822 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1823 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1824 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) 1825 rcu_read_unlock_trace_special(t); 1826 else 1827 WRITE_ONCE(t->trc_reader_nesting, 0); 1828 } 1829 1830 /** 1831 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period 1832 * @rhp: structure to be used for queueing the RCU updates. 1833 * @func: actual callback function to be invoked after the grace period 1834 * 1835 * The callback function will be invoked some time after a trace rcu-tasks 1836 * grace period elapses, in other words after all currently executing 1837 * trace rcu-tasks read-side critical sections have completed. These 1838 * read-side critical sections are delimited by calls to rcu_read_lock_trace() 1839 * and rcu_read_unlock_trace(). 1840 * 1841 * See the description of call_rcu() for more detailed information on 1842 * memory ordering guarantees. 1843 */ 1844 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) 1845 { 1846 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); 1847 } 1848 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); 1849 1850 /** 1851 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period 1852 * 1853 * Control will return to the caller some time after a trace rcu-tasks 1854 * grace period has elapsed, in other words after all currently executing 1855 * trace rcu-tasks read-side critical sections have elapsed. These read-side 1856 * critical sections are delimited by calls to rcu_read_lock_trace() 1857 * and rcu_read_unlock_trace(). 1858 * 1859 * This is a very specialized primitive, intended only for a few uses in 1860 * tracing and other situations requiring manipulation of function preambles 1861 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not 1862 * (yet) intended for heavy use from multiple CPUs. 1863 * 1864 * See the description of synchronize_rcu() for more detailed information 1865 * on memory ordering guarantees. 1866 */ 1867 void synchronize_rcu_tasks_trace(void) 1868 { 1869 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); 1870 synchronize_rcu_tasks_generic(&rcu_tasks_trace); 1871 } 1872 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); 1873 1874 /** 1875 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. 1876 * 1877 * Although the current implementation is guaranteed to wait, it is not 1878 * obligated to, for example, if there are no pending callbacks. 1879 */ 1880 void rcu_barrier_tasks_trace(void) 1881 { 1882 rcu_barrier_tasks_generic(&rcu_tasks_trace); 1883 } 1884 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); 1885 1886 int rcu_tasks_trace_lazy_ms = -1; 1887 module_param(rcu_tasks_trace_lazy_ms, int, 0444); 1888 1889 static int __init rcu_spawn_tasks_trace_kthread(void) 1890 { 1891 cblist_init_generic(&rcu_tasks_trace); 1892 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { 1893 rcu_tasks_trace.gp_sleep = HZ / 10; 1894 rcu_tasks_trace.init_fract = HZ / 10; 1895 } else { 1896 rcu_tasks_trace.gp_sleep = HZ / 200; 1897 if (rcu_tasks_trace.gp_sleep <= 0) 1898 rcu_tasks_trace.gp_sleep = 1; 1899 rcu_tasks_trace.init_fract = HZ / 200; 1900 if (rcu_tasks_trace.init_fract <= 0) 1901 rcu_tasks_trace.init_fract = 1; 1902 } 1903 if (rcu_tasks_trace_lazy_ms >= 0) 1904 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms); 1905 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; 1906 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; 1907 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; 1908 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; 1909 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); 1910 return 0; 1911 } 1912 1913 #if !defined(CONFIG_TINY_RCU) 1914 void show_rcu_tasks_trace_gp_kthread(void) 1915 { 1916 char buf[64]; 1917 1918 sprintf(buf, "N%lu h:%lu/%lu/%lu", 1919 data_race(n_trc_holdouts), 1920 data_race(n_heavy_reader_ofl_updates), 1921 data_race(n_heavy_reader_updates), 1922 data_race(n_heavy_reader_attempts)); 1923 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); 1924 } 1925 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); 1926 #endif // !defined(CONFIG_TINY_RCU) 1927 1928 struct task_struct *get_rcu_tasks_trace_gp_kthread(void) 1929 { 1930 return rcu_tasks_trace.kthread_ptr; 1931 } 1932 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); 1933 1934 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ 1935 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } 1936 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ 1937 1938 #ifndef CONFIG_TINY_RCU 1939 void show_rcu_tasks_gp_kthreads(void) 1940 { 1941 show_rcu_tasks_classic_gp_kthread(); 1942 show_rcu_tasks_rude_gp_kthread(); 1943 show_rcu_tasks_trace_gp_kthread(); 1944 } 1945 #endif /* #ifndef CONFIG_TINY_RCU */ 1946 1947 #ifdef CONFIG_PROVE_RCU 1948 struct rcu_tasks_test_desc { 1949 struct rcu_head rh; 1950 const char *name; 1951 bool notrun; 1952 unsigned long runstart; 1953 }; 1954 1955 static struct rcu_tasks_test_desc tests[] = { 1956 { 1957 .name = "call_rcu_tasks()", 1958 /* If not defined, the test is skipped. */ 1959 .notrun = IS_ENABLED(CONFIG_TASKS_RCU), 1960 }, 1961 { 1962 .name = "call_rcu_tasks_rude()", 1963 /* If not defined, the test is skipped. */ 1964 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU), 1965 }, 1966 { 1967 .name = "call_rcu_tasks_trace()", 1968 /* If not defined, the test is skipped. */ 1969 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) 1970 } 1971 }; 1972 1973 static void test_rcu_tasks_callback(struct rcu_head *rhp) 1974 { 1975 struct rcu_tasks_test_desc *rttd = 1976 container_of(rhp, struct rcu_tasks_test_desc, rh); 1977 1978 pr_info("Callback from %s invoked.\n", rttd->name); 1979 1980 rttd->notrun = false; 1981 } 1982 1983 static void rcu_tasks_initiate_self_tests(void) 1984 { 1985 #ifdef CONFIG_TASKS_RCU 1986 pr_info("Running RCU Tasks wait API self tests\n"); 1987 tests[0].runstart = jiffies; 1988 synchronize_rcu_tasks(); 1989 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); 1990 #endif 1991 1992 #ifdef CONFIG_TASKS_RUDE_RCU 1993 pr_info("Running RCU Tasks Rude wait API self tests\n"); 1994 tests[1].runstart = jiffies; 1995 synchronize_rcu_tasks_rude(); 1996 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); 1997 #endif 1998 1999 #ifdef CONFIG_TASKS_TRACE_RCU 2000 pr_info("Running RCU Tasks Trace wait API self tests\n"); 2001 tests[2].runstart = jiffies; 2002 synchronize_rcu_tasks_trace(); 2003 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); 2004 #endif 2005 } 2006 2007 /* 2008 * Return: 0 - test passed 2009 * 1 - test failed, but have not timed out yet 2010 * -1 - test failed and timed out 2011 */ 2012 static int rcu_tasks_verify_self_tests(void) 2013 { 2014 int ret = 0; 2015 int i; 2016 unsigned long bst = rcu_task_stall_timeout; 2017 2018 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) 2019 bst = RCU_TASK_BOOT_STALL_TIMEOUT; 2020 for (i = 0; i < ARRAY_SIZE(tests); i++) { 2021 while (tests[i].notrun) { // still hanging. 2022 if (time_after(jiffies, tests[i].runstart + bst)) { 2023 pr_err("%s has failed boot-time tests.\n", tests[i].name); 2024 ret = -1; 2025 break; 2026 } 2027 ret = 1; 2028 break; 2029 } 2030 } 2031 WARN_ON(ret < 0); 2032 2033 return ret; 2034 } 2035 2036 /* 2037 * Repeat the rcu_tasks_verify_self_tests() call once every second until the 2038 * test passes or has timed out. 2039 */ 2040 static struct delayed_work rcu_tasks_verify_work; 2041 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) 2042 { 2043 int ret = rcu_tasks_verify_self_tests(); 2044 2045 if (ret <= 0) 2046 return; 2047 2048 /* Test fails but not timed out yet, reschedule another check */ 2049 schedule_delayed_work(&rcu_tasks_verify_work, HZ); 2050 } 2051 2052 static int rcu_tasks_verify_schedule_work(void) 2053 { 2054 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); 2055 rcu_tasks_verify_work_fn(NULL); 2056 return 0; 2057 } 2058 late_initcall(rcu_tasks_verify_schedule_work); 2059 #else /* #ifdef CONFIG_PROVE_RCU */ 2060 static void rcu_tasks_initiate_self_tests(void) { } 2061 #endif /* #else #ifdef CONFIG_PROVE_RCU */ 2062 2063 void __init rcu_init_tasks_generic(void) 2064 { 2065 #ifdef CONFIG_TASKS_RCU 2066 rcu_spawn_tasks_kthread(); 2067 #endif 2068 2069 #ifdef CONFIG_TASKS_RUDE_RCU 2070 rcu_spawn_tasks_rude_kthread(); 2071 #endif 2072 2073 #ifdef CONFIG_TASKS_TRACE_RCU 2074 rcu_spawn_tasks_trace_kthread(); 2075 #endif 2076 2077 // Run the self-tests. 2078 rcu_tasks_initiate_self_tests(); 2079 } 2080 2081 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 2082 static inline void rcu_tasks_bootup_oddness(void) {} 2083 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 2084