1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Task-based RCU implementations. 4 * 5 * Copyright (C) 2020 Paul E. McKenney 6 */ 7 8 #ifdef CONFIG_TASKS_RCU_GENERIC 9 #include "rcu_segcblist.h" 10 11 //////////////////////////////////////////////////////////////////////// 12 // 13 // Generic data structures. 14 15 struct rcu_tasks; 16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); 17 typedef void (*pregp_func_t)(struct list_head *hop); 18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); 19 typedef void (*postscan_func_t)(struct list_head *hop); 20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); 21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp); 22 23 /** 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. 25 * @cblist: Callback list. 26 * @lock: Lock protecting per-CPU callback list. 27 * @rtp_jiffies: Jiffies counter value for statistics. 28 * @lazy_timer: Timer to unlazify callbacks. 29 * @urgent_gp: Number of additional non-lazy grace periods. 30 * @rtp_n_lock_retries: Rough lock-contention statistic. 31 * @rtp_work: Work queue for invoking callbacks. 32 * @rtp_irq_work: IRQ work queue for deferred wakeups. 33 * @barrier_q_head: RCU callback for barrier operation. 34 * @rtp_blkd_tasks: List of tasks blocked as readers. 35 * @rtp_exit_list: List of tasks in the latter portion of do_exit(). 36 * @cpu: CPU number corresponding to this entry. 37 * @rtpp: Pointer to the rcu_tasks structure. 38 */ 39 struct rcu_tasks_percpu { 40 struct rcu_segcblist cblist; 41 raw_spinlock_t __private lock; 42 unsigned long rtp_jiffies; 43 unsigned long rtp_n_lock_retries; 44 struct timer_list lazy_timer; 45 unsigned int urgent_gp; 46 struct work_struct rtp_work; 47 struct irq_work rtp_irq_work; 48 struct rcu_head barrier_q_head; 49 struct list_head rtp_blkd_tasks; 50 struct list_head rtp_exit_list; 51 int cpu; 52 struct rcu_tasks *rtpp; 53 }; 54 55 /** 56 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. 57 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. 58 * @cbs_gbl_lock: Lock protecting callback list. 59 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. 60 * @gp_func: This flavor's grace-period-wait function. 61 * @gp_state: Grace period's most recent state transition (debugging). 62 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. 63 * @init_fract: Initial backoff sleep interval. 64 * @gp_jiffies: Time of last @gp_state transition. 65 * @gp_start: Most recent grace-period start in jiffies. 66 * @tasks_gp_seq: Number of grace periods completed since boot. 67 * @n_ipis: Number of IPIs sent to encourage grace periods to end. 68 * @n_ipis_fails: Number of IPI-send failures. 69 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. 70 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. 71 * @pregp_func: This flavor's pre-grace-period function (optional). 72 * @pertask_func: This flavor's per-task scan function (optional). 73 * @postscan_func: This flavor's post-task scan function (optional). 74 * @holdouts_func: This flavor's holdout-list scan function (optional). 75 * @postgp_func: This flavor's post-grace-period function (optional). 76 * @call_func: This flavor's call_rcu()-equivalent function. 77 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE). 78 * @rtpcpu: This flavor's rcu_tasks_percpu structure. 79 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. 80 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. 81 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. 82 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. 83 * @barrier_q_mutex: Serialize barrier operations. 84 * @barrier_q_count: Number of queues being waited on. 85 * @barrier_q_completion: Barrier wait/wakeup mechanism. 86 * @barrier_q_seq: Sequence number for barrier operations. 87 * @name: This flavor's textual name. 88 * @kname: This flavor's kthread name. 89 */ 90 struct rcu_tasks { 91 struct rcuwait cbs_wait; 92 raw_spinlock_t cbs_gbl_lock; 93 struct mutex tasks_gp_mutex; 94 int gp_state; 95 int gp_sleep; 96 int init_fract; 97 unsigned long gp_jiffies; 98 unsigned long gp_start; 99 unsigned long tasks_gp_seq; 100 unsigned long n_ipis; 101 unsigned long n_ipis_fails; 102 struct task_struct *kthread_ptr; 103 unsigned long lazy_jiffies; 104 rcu_tasks_gp_func_t gp_func; 105 pregp_func_t pregp_func; 106 pertask_func_t pertask_func; 107 postscan_func_t postscan_func; 108 holdouts_func_t holdouts_func; 109 postgp_func_t postgp_func; 110 call_rcu_func_t call_func; 111 unsigned int wait_state; 112 struct rcu_tasks_percpu __percpu *rtpcpu; 113 int percpu_enqueue_shift; 114 int percpu_enqueue_lim; 115 int percpu_dequeue_lim; 116 unsigned long percpu_dequeue_gpseq; 117 struct mutex barrier_q_mutex; 118 atomic_t barrier_q_count; 119 struct completion barrier_q_completion; 120 unsigned long barrier_q_seq; 121 char *name; 122 char *kname; 123 }; 124 125 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); 126 127 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ 128 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ 129 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ 130 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ 131 }; \ 132 static struct rcu_tasks rt_name = \ 133 { \ 134 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ 135 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ 136 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ 137 .gp_func = gp, \ 138 .call_func = call, \ 139 .wait_state = TASK_UNINTERRUPTIBLE, \ 140 .rtpcpu = &rt_name ## __percpu, \ 141 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ 142 .name = n, \ 143 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ 144 .percpu_enqueue_lim = 1, \ 145 .percpu_dequeue_lim = 1, \ 146 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ 147 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ 148 .kname = #rt_name, \ 149 } 150 151 #ifdef CONFIG_TASKS_RCU 152 153 /* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */ 154 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); 155 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); 156 #endif 157 158 /* Avoid IPIing CPUs early in the grace period. */ 159 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) 160 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; 161 module_param(rcu_task_ipi_delay, int, 0644); 162 163 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 164 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) 165 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) 166 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; 167 module_param(rcu_task_stall_timeout, int, 0644); 168 #define RCU_TASK_STALL_INFO (HZ * 10) 169 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; 170 module_param(rcu_task_stall_info, int, 0644); 171 static int rcu_task_stall_info_mult __read_mostly = 3; 172 module_param(rcu_task_stall_info_mult, int, 0444); 173 174 static int rcu_task_enqueue_lim __read_mostly = -1; 175 module_param(rcu_task_enqueue_lim, int, 0444); 176 177 static bool rcu_task_cb_adjust; 178 static int rcu_task_contend_lim __read_mostly = 100; 179 module_param(rcu_task_contend_lim, int, 0444); 180 static int rcu_task_collapse_lim __read_mostly = 10; 181 module_param(rcu_task_collapse_lim, int, 0444); 182 static int rcu_task_lazy_lim __read_mostly = 32; 183 module_param(rcu_task_lazy_lim, int, 0444); 184 185 /* RCU tasks grace-period state for debugging. */ 186 #define RTGS_INIT 0 187 #define RTGS_WAIT_WAIT_CBS 1 188 #define RTGS_WAIT_GP 2 189 #define RTGS_PRE_WAIT_GP 3 190 #define RTGS_SCAN_TASKLIST 4 191 #define RTGS_POST_SCAN_TASKLIST 5 192 #define RTGS_WAIT_SCAN_HOLDOUTS 6 193 #define RTGS_SCAN_HOLDOUTS 7 194 #define RTGS_POST_GP 8 195 #define RTGS_WAIT_READERS 9 196 #define RTGS_INVOKE_CBS 10 197 #define RTGS_WAIT_CBS 11 198 #ifndef CONFIG_TINY_RCU 199 static const char * const rcu_tasks_gp_state_names[] = { 200 "RTGS_INIT", 201 "RTGS_WAIT_WAIT_CBS", 202 "RTGS_WAIT_GP", 203 "RTGS_PRE_WAIT_GP", 204 "RTGS_SCAN_TASKLIST", 205 "RTGS_POST_SCAN_TASKLIST", 206 "RTGS_WAIT_SCAN_HOLDOUTS", 207 "RTGS_SCAN_HOLDOUTS", 208 "RTGS_POST_GP", 209 "RTGS_WAIT_READERS", 210 "RTGS_INVOKE_CBS", 211 "RTGS_WAIT_CBS", 212 }; 213 #endif /* #ifndef CONFIG_TINY_RCU */ 214 215 //////////////////////////////////////////////////////////////////////// 216 // 217 // Generic code. 218 219 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); 220 221 /* Record grace-period phase and time. */ 222 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) 223 { 224 rtp->gp_state = newstate; 225 rtp->gp_jiffies = jiffies; 226 } 227 228 #ifndef CONFIG_TINY_RCU 229 /* Return state name. */ 230 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) 231 { 232 int i = data_race(rtp->gp_state); // Let KCSAN detect update races 233 int j = READ_ONCE(i); // Prevent the compiler from reading twice 234 235 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) 236 return "???"; 237 return rcu_tasks_gp_state_names[j]; 238 } 239 #endif /* #ifndef CONFIG_TINY_RCU */ 240 241 // Initialize per-CPU callback lists for the specified flavor of 242 // Tasks RCU. Do not enqueue callbacks before this function is invoked. 243 static void cblist_init_generic(struct rcu_tasks *rtp) 244 { 245 int cpu; 246 int lim; 247 int shift; 248 249 if (rcu_task_enqueue_lim < 0) { 250 rcu_task_enqueue_lim = 1; 251 rcu_task_cb_adjust = true; 252 } else if (rcu_task_enqueue_lim == 0) { 253 rcu_task_enqueue_lim = 1; 254 } 255 lim = rcu_task_enqueue_lim; 256 257 if (lim > nr_cpu_ids) 258 lim = nr_cpu_ids; 259 shift = ilog2(nr_cpu_ids / lim); 260 if (((nr_cpu_ids - 1) >> shift) >= lim) 261 shift++; 262 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); 263 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); 264 smp_store_release(&rtp->percpu_enqueue_lim, lim); 265 for_each_possible_cpu(cpu) { 266 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 267 268 WARN_ON_ONCE(!rtpcp); 269 if (cpu) 270 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); 271 if (rcu_segcblist_empty(&rtpcp->cblist)) 272 rcu_segcblist_init(&rtpcp->cblist); 273 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); 274 rtpcp->cpu = cpu; 275 rtpcp->rtpp = rtp; 276 if (!rtpcp->rtp_blkd_tasks.next) 277 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 278 if (!rtpcp->rtp_exit_list.next) 279 INIT_LIST_HEAD(&rtpcp->rtp_exit_list); 280 } 281 282 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name, 283 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); 284 } 285 286 // Compute wakeup time for lazy callback timer. 287 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) 288 { 289 return jiffies + rtp->lazy_jiffies; 290 } 291 292 // Timer handler that unlazifies lazy callbacks. 293 static void call_rcu_tasks_generic_timer(struct timer_list *tlp) 294 { 295 unsigned long flags; 296 bool needwake = false; 297 struct rcu_tasks *rtp; 298 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer); 299 300 rtp = rtpcp->rtpp; 301 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 302 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { 303 if (!rtpcp->urgent_gp) 304 rtpcp->urgent_gp = 1; 305 needwake = true; 306 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); 307 } 308 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 309 if (needwake) 310 rcuwait_wake_up(&rtp->cbs_wait); 311 } 312 313 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). 314 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) 315 { 316 struct rcu_tasks *rtp; 317 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); 318 319 rtp = rtpcp->rtpp; 320 rcuwait_wake_up(&rtp->cbs_wait); 321 } 322 323 // Enqueue a callback for the specified flavor of Tasks RCU. 324 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, 325 struct rcu_tasks *rtp) 326 { 327 int chosen_cpu; 328 unsigned long flags; 329 bool havekthread = smp_load_acquire(&rtp->kthread_ptr); 330 int ideal_cpu; 331 unsigned long j; 332 bool needadjust = false; 333 bool needwake; 334 struct rcu_tasks_percpu *rtpcp; 335 336 rhp->next = NULL; 337 rhp->func = func; 338 local_irq_save(flags); 339 rcu_read_lock(); 340 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); 341 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); 342 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); 343 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. 344 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 345 j = jiffies; 346 if (rtpcp->rtp_jiffies != j) { 347 rtpcp->rtp_jiffies = j; 348 rtpcp->rtp_n_lock_retries = 0; 349 } 350 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && 351 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) 352 needadjust = true; // Defer adjustment to avoid deadlock. 353 } 354 // Queuing callbacks before initialization not yet supported. 355 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) 356 rcu_segcblist_init(&rtpcp->cblist); 357 needwake = (func == wakeme_after_rcu) || 358 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); 359 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { 360 if (rtp->lazy_jiffies) 361 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); 362 else 363 needwake = rcu_segcblist_empty(&rtpcp->cblist); 364 } 365 if (needwake) 366 rtpcp->urgent_gp = 3; 367 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); 368 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 369 if (unlikely(needadjust)) { 370 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 371 if (rtp->percpu_enqueue_lim != nr_cpu_ids) { 372 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); 373 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); 374 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); 375 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); 376 } 377 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 378 } 379 rcu_read_unlock(); 380 /* We can't create the thread unless interrupts are enabled. */ 381 if (needwake && READ_ONCE(rtp->kthread_ptr)) 382 irq_work_queue(&rtpcp->rtp_irq_work); 383 } 384 385 // RCU callback function for rcu_barrier_tasks_generic(). 386 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) 387 { 388 struct rcu_tasks *rtp; 389 struct rcu_tasks_percpu *rtpcp; 390 391 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); 392 rtp = rtpcp->rtpp; 393 if (atomic_dec_and_test(&rtp->barrier_q_count)) 394 complete(&rtp->barrier_q_completion); 395 } 396 397 // Wait for all in-flight callbacks for the specified RCU Tasks flavor. 398 // Operates in a manner similar to rcu_barrier(). 399 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) 400 { 401 int cpu; 402 unsigned long flags; 403 struct rcu_tasks_percpu *rtpcp; 404 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); 405 406 mutex_lock(&rtp->barrier_q_mutex); 407 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { 408 smp_mb(); 409 mutex_unlock(&rtp->barrier_q_mutex); 410 return; 411 } 412 rcu_seq_start(&rtp->barrier_q_seq); 413 init_completion(&rtp->barrier_q_completion); 414 atomic_set(&rtp->barrier_q_count, 2); 415 for_each_possible_cpu(cpu) { 416 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) 417 break; 418 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 419 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; 420 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 421 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) 422 atomic_inc(&rtp->barrier_q_count); 423 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 424 } 425 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) 426 complete(&rtp->barrier_q_completion); 427 wait_for_completion(&rtp->barrier_q_completion); 428 rcu_seq_end(&rtp->barrier_q_seq); 429 mutex_unlock(&rtp->barrier_q_mutex); 430 } 431 432 // Advance callbacks and indicate whether either a grace period or 433 // callback invocation is needed. 434 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) 435 { 436 int cpu; 437 int dequeue_limit; 438 unsigned long flags; 439 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); 440 long n; 441 long ncbs = 0; 442 long ncbsnz = 0; 443 int needgpcb = 0; 444 445 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); 446 for (cpu = 0; cpu < dequeue_limit; cpu++) { 447 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 448 449 /* Advance and accelerate any new callbacks. */ 450 if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) 451 continue; 452 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 453 // Should we shrink down to a single callback queue? 454 n = rcu_segcblist_n_cbs(&rtpcp->cblist); 455 if (n) { 456 ncbs += n; 457 if (cpu > 0) 458 ncbsnz += n; 459 } 460 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 461 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 462 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { 463 if (rtp->lazy_jiffies) 464 rtpcp->urgent_gp--; 465 needgpcb |= 0x3; 466 } else if (rcu_segcblist_empty(&rtpcp->cblist)) { 467 rtpcp->urgent_gp = 0; 468 } 469 if (rcu_segcblist_ready_cbs(&rtpcp->cblist)) 470 needgpcb |= 0x1; 471 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 472 } 473 474 // Shrink down to a single callback queue if appropriate. 475 // This is done in two stages: (1) If there are no more than 476 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other 477 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, 478 // if there has not been an increase in callbacks, limit dequeuing 479 // to CPU 0. Note the matching RCU read-side critical section in 480 // call_rcu_tasks_generic(). 481 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { 482 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 483 if (rtp->percpu_enqueue_lim > 1) { 484 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); 485 smp_store_release(&rtp->percpu_enqueue_lim, 1); 486 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); 487 gpdone = false; 488 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); 489 } 490 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 491 } 492 if (rcu_task_cb_adjust && !ncbsnz && gpdone) { 493 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 494 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { 495 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); 496 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); 497 } 498 if (rtp->percpu_dequeue_lim == 1) { 499 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { 500 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 501 502 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); 503 } 504 } 505 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 506 } 507 508 return needgpcb; 509 } 510 511 // Advance callbacks and invoke any that are ready. 512 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) 513 { 514 int cpu; 515 int cpunext; 516 int cpuwq; 517 unsigned long flags; 518 int len; 519 struct rcu_head *rhp; 520 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 521 struct rcu_tasks_percpu *rtpcp_next; 522 523 cpu = rtpcp->cpu; 524 cpunext = cpu * 2 + 1; 525 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 526 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); 527 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND; 528 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); 529 cpunext++; 530 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 531 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); 532 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND; 533 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); 534 } 535 } 536 537 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu)) 538 return; 539 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 540 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 541 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); 542 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 543 len = rcl.len; 544 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { 545 debug_rcu_head_callback(rhp); 546 local_bh_disable(); 547 rhp->func(rhp); 548 local_bh_enable(); 549 cond_resched(); 550 } 551 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 552 rcu_segcblist_add_len(&rtpcp->cblist, -len); 553 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 554 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 555 } 556 557 // Workqueue flood to advance callbacks and invoke any that are ready. 558 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) 559 { 560 struct rcu_tasks *rtp; 561 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); 562 563 rtp = rtpcp->rtpp; 564 rcu_tasks_invoke_cbs(rtp, rtpcp); 565 } 566 567 // Wait for one grace period. 568 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) 569 { 570 int needgpcb; 571 572 mutex_lock(&rtp->tasks_gp_mutex); 573 574 // If there were none, wait a bit and start over. 575 if (unlikely(midboot)) { 576 needgpcb = 0x2; 577 } else { 578 mutex_unlock(&rtp->tasks_gp_mutex); 579 set_tasks_gp_state(rtp, RTGS_WAIT_CBS); 580 rcuwait_wait_event(&rtp->cbs_wait, 581 (needgpcb = rcu_tasks_need_gpcb(rtp)), 582 TASK_IDLE); 583 mutex_lock(&rtp->tasks_gp_mutex); 584 } 585 586 if (needgpcb & 0x2) { 587 // Wait for one grace period. 588 set_tasks_gp_state(rtp, RTGS_WAIT_GP); 589 rtp->gp_start = jiffies; 590 rcu_seq_start(&rtp->tasks_gp_seq); 591 rtp->gp_func(rtp); 592 rcu_seq_end(&rtp->tasks_gp_seq); 593 } 594 595 // Invoke callbacks. 596 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); 597 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); 598 mutex_unlock(&rtp->tasks_gp_mutex); 599 } 600 601 // RCU-tasks kthread that detects grace periods and invokes callbacks. 602 static int __noreturn rcu_tasks_kthread(void *arg) 603 { 604 int cpu; 605 struct rcu_tasks *rtp = arg; 606 607 for_each_possible_cpu(cpu) { 608 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 609 610 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); 611 rtpcp->urgent_gp = 1; 612 } 613 614 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 615 housekeeping_affine(current, HK_TYPE_RCU); 616 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! 617 618 /* 619 * Each pass through the following loop makes one check for 620 * newly arrived callbacks, and, if there are some, waits for 621 * one RCU-tasks grace period and then invokes the callbacks. 622 * This loop is terminated by the system going down. ;-) 623 */ 624 for (;;) { 625 // Wait for one grace period and invoke any callbacks 626 // that are ready. 627 rcu_tasks_one_gp(rtp, false); 628 629 // Paranoid sleep to keep this from entering a tight loop. 630 schedule_timeout_idle(rtp->gp_sleep); 631 } 632 } 633 634 // Wait for a grace period for the specified flavor of Tasks RCU. 635 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) 636 { 637 /* Complain if the scheduler has not started. */ 638 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 639 "synchronize_%s() called too soon", rtp->name)) 640 return; 641 642 // If the grace-period kthread is running, use it. 643 if (READ_ONCE(rtp->kthread_ptr)) { 644 wait_rcu_gp_state(rtp->wait_state, rtp->call_func); 645 return; 646 } 647 rcu_tasks_one_gp(rtp, true); 648 } 649 650 /* Spawn RCU-tasks grace-period kthread. */ 651 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) 652 { 653 struct task_struct *t; 654 655 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); 656 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) 657 return; 658 smp_mb(); /* Ensure others see full kthread. */ 659 } 660 661 #ifndef CONFIG_TINY_RCU 662 663 /* 664 * Print any non-default Tasks RCU settings. 665 */ 666 static void __init rcu_tasks_bootup_oddness(void) 667 { 668 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 669 int rtsimc; 670 671 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) 672 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); 673 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); 674 if (rtsimc != rcu_task_stall_info_mult) { 675 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); 676 rcu_task_stall_info_mult = rtsimc; 677 } 678 #endif /* #ifdef CONFIG_TASKS_RCU */ 679 #ifdef CONFIG_TASKS_RCU 680 pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); 681 #endif /* #ifdef CONFIG_TASKS_RCU */ 682 #ifdef CONFIG_TASKS_RUDE_RCU 683 pr_info("\tRude variant of Tasks RCU enabled.\n"); 684 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 685 #ifdef CONFIG_TASKS_TRACE_RCU 686 pr_info("\tTracing variant of Tasks RCU enabled.\n"); 687 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 688 } 689 690 #endif /* #ifndef CONFIG_TINY_RCU */ 691 692 #ifndef CONFIG_TINY_RCU 693 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ 694 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) 695 { 696 int cpu; 697 bool havecbs = false; 698 bool haveurgent = false; 699 bool haveurgentcbs = false; 700 701 for_each_possible_cpu(cpu) { 702 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 703 704 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) 705 havecbs = true; 706 if (data_race(rtpcp->urgent_gp)) 707 haveurgent = true; 708 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) 709 haveurgentcbs = true; 710 if (havecbs && haveurgent && haveurgentcbs) 711 break; 712 } 713 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n", 714 rtp->kname, 715 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), 716 jiffies - data_race(rtp->gp_jiffies), 717 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), 718 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), 719 ".k"[!!data_race(rtp->kthread_ptr)], 720 ".C"[havecbs], 721 ".u"[haveurgent], 722 ".U"[haveurgentcbs], 723 rtp->lazy_jiffies, 724 s); 725 } 726 #endif // #ifndef CONFIG_TINY_RCU 727 728 static void exit_tasks_rcu_finish_trace(struct task_struct *t); 729 730 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 731 732 //////////////////////////////////////////////////////////////////////// 733 // 734 // Shared code between task-list-scanning variants of Tasks RCU. 735 736 /* Wait for one RCU-tasks grace period. */ 737 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) 738 { 739 struct task_struct *g; 740 int fract; 741 LIST_HEAD(holdouts); 742 unsigned long j; 743 unsigned long lastinfo; 744 unsigned long lastreport; 745 bool reported = false; 746 int rtsi; 747 struct task_struct *t; 748 749 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); 750 rtp->pregp_func(&holdouts); 751 752 /* 753 * There were callbacks, so we need to wait for an RCU-tasks 754 * grace period. Start off by scanning the task list for tasks 755 * that are not already voluntarily blocked. Mark these tasks 756 * and make a list of them in holdouts. 757 */ 758 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); 759 if (rtp->pertask_func) { 760 rcu_read_lock(); 761 for_each_process_thread(g, t) 762 rtp->pertask_func(t, &holdouts); 763 rcu_read_unlock(); 764 } 765 766 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); 767 rtp->postscan_func(&holdouts); 768 769 /* 770 * Each pass through the following loop scans the list of holdout 771 * tasks, removing any that are no longer holdouts. When the list 772 * is empty, we are done. 773 */ 774 lastreport = jiffies; 775 lastinfo = lastreport; 776 rtsi = READ_ONCE(rcu_task_stall_info); 777 778 // Start off with initial wait and slowly back off to 1 HZ wait. 779 fract = rtp->init_fract; 780 781 while (!list_empty(&holdouts)) { 782 ktime_t exp; 783 bool firstreport; 784 bool needreport; 785 int rtst; 786 787 // Slowly back off waiting for holdouts 788 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); 789 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 790 schedule_timeout_idle(fract); 791 } else { 792 exp = jiffies_to_nsecs(fract); 793 __set_current_state(TASK_IDLE); 794 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD); 795 } 796 797 if (fract < HZ) 798 fract++; 799 800 rtst = READ_ONCE(rcu_task_stall_timeout); 801 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); 802 if (needreport) { 803 lastreport = jiffies; 804 reported = true; 805 } 806 firstreport = true; 807 WARN_ON(signal_pending(current)); 808 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); 809 rtp->holdouts_func(&holdouts, needreport, &firstreport); 810 811 // Print pre-stall informational messages if needed. 812 j = jiffies; 813 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { 814 lastinfo = j; 815 rtsi = rtsi * rcu_task_stall_info_mult; 816 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n", 817 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); 818 } 819 } 820 821 set_tasks_gp_state(rtp, RTGS_POST_GP); 822 rtp->postgp_func(rtp); 823 } 824 825 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ 826 827 #ifdef CONFIG_TASKS_RCU 828 829 //////////////////////////////////////////////////////////////////////// 830 // 831 // Simple variant of RCU whose quiescent states are voluntary context 832 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. 833 // As such, grace periods can take one good long time. There are no 834 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() 835 // because this implementation is intended to get the system into a safe 836 // state for some of the manipulations involved in tracing and the like. 837 // Finally, this implementation does not support high call_rcu_tasks() 838 // rates from multiple CPUs. If this is required, per-CPU callback lists 839 // will be needed. 840 // 841 // The implementation uses rcu_tasks_wait_gp(), which relies on function 842 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() 843 // function sets these function pointers up so that rcu_tasks_wait_gp() 844 // invokes these functions in this order: 845 // 846 // rcu_tasks_pregp_step(): 847 // Invokes synchronize_rcu() in order to wait for all in-flight 848 // t->on_rq and t->nvcsw transitions to complete. This works because 849 // all such transitions are carried out with interrupts disabled. 850 // rcu_tasks_pertask(), invoked on every non-idle task: 851 // For every runnable non-idle task other than the current one, use 852 // get_task_struct() to pin down that task, snapshot that task's 853 // number of voluntary context switches, and add that task to the 854 // holdout list. 855 // rcu_tasks_postscan(): 856 // Gather per-CPU lists of tasks in do_exit() to ensure that all 857 // tasks that were in the process of exiting (and which thus might 858 // not know to synchronize with this RCU Tasks grace period) have 859 // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() 860 // will take care of any tasks stuck in the non-preemptible region 861 // of do_exit() following its call to exit_tasks_rcu_stop(). 862 // check_all_holdout_tasks(), repeatedly until holdout list is empty: 863 // Scans the holdout list, attempting to identify a quiescent state 864 // for each task on the list. If there is a quiescent state, the 865 // corresponding task is removed from the holdout list. 866 // rcu_tasks_postgp(): 867 // Invokes synchronize_rcu() in order to ensure that all prior 868 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks 869 // to have happened before the end of this RCU Tasks grace period. 870 // Again, this works because all such transitions are carried out 871 // with interrupts disabled. 872 // 873 // For each exiting task, the exit_tasks_rcu_start() and 874 // exit_tasks_rcu_finish() functions add and remove, respectively, the 875 // current task to a per-CPU list of tasks that rcu_tasks_postscan() must 876 // wait on. This is necessary because rcu_tasks_postscan() must wait on 877 // tasks that have already been removed from the global list of tasks. 878 // 879 // Pre-grace-period update-side code is ordered before the grace 880 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code 881 // is ordered before the grace period via synchronize_rcu() call in 882 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt 883 // disabling. 884 885 /* Pre-grace-period preparation. */ 886 static void rcu_tasks_pregp_step(struct list_head *hop) 887 { 888 /* 889 * Wait for all pre-existing t->on_rq and t->nvcsw transitions 890 * to complete. Invoking synchronize_rcu() suffices because all 891 * these transitions occur with interrupts disabled. Without this 892 * synchronize_rcu(), a read-side critical section that started 893 * before the grace period might be incorrectly seen as having 894 * started after the grace period. 895 * 896 * This synchronize_rcu() also dispenses with the need for a 897 * memory barrier on the first store to t->rcu_tasks_holdout, 898 * as it forces the store to happen after the beginning of the 899 * grace period. 900 */ 901 synchronize_rcu(); 902 } 903 904 /* Check for quiescent states since the pregp's synchronize_rcu() */ 905 static bool rcu_tasks_is_holdout(struct task_struct *t) 906 { 907 int cpu; 908 909 /* Has the task been seen voluntarily sleeping? */ 910 if (!READ_ONCE(t->on_rq)) 911 return false; 912 913 /* 914 * Idle tasks (or idle injection) within the idle loop are RCU-tasks 915 * quiescent states. But CPU boot code performed by the idle task 916 * isn't a quiescent state. 917 */ 918 if (is_idle_task(t)) 919 return false; 920 921 cpu = task_cpu(t); 922 923 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ 924 if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) 925 return false; 926 927 return true; 928 } 929 930 /* Per-task initial processing. */ 931 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) 932 { 933 if (t != current && rcu_tasks_is_holdout(t)) { 934 get_task_struct(t); 935 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 936 WRITE_ONCE(t->rcu_tasks_holdout, true); 937 list_add(&t->rcu_tasks_holdout_list, hop); 938 } 939 } 940 941 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); 942 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); 943 944 /* Processing between scanning taskslist and draining the holdout list. */ 945 static void rcu_tasks_postscan(struct list_head *hop) 946 { 947 int cpu; 948 int rtsi = READ_ONCE(rcu_task_stall_info); 949 950 if (!IS_ENABLED(CONFIG_TINY_RCU)) { 951 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; 952 add_timer(&tasks_rcu_exit_srcu_stall_timer); 953 } 954 955 /* 956 * Exiting tasks may escape the tasklist scan. Those are vulnerable 957 * until their final schedule() with TASK_DEAD state. To cope with 958 * this, divide the fragile exit path part in two intersecting 959 * read side critical sections: 960 * 961 * 1) A task_struct list addition before calling exit_notify(), 962 * which may remove the task from the tasklist, with the 963 * removal after the final preempt_disable() call in do_exit(). 964 * 965 * 2) An _RCU_ read side starting with the final preempt_disable() 966 * call in do_exit() and ending with the final call to schedule() 967 * with TASK_DEAD state. 968 * 969 * This handles the part 1). And postgp will handle part 2) with a 970 * call to synchronize_rcu(). 971 */ 972 973 for_each_possible_cpu(cpu) { 974 unsigned long j = jiffies + 1; 975 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); 976 struct task_struct *t; 977 struct task_struct *t1; 978 struct list_head tmp; 979 980 raw_spin_lock_irq_rcu_node(rtpcp); 981 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { 982 if (list_empty(&t->rcu_tasks_holdout_list)) 983 rcu_tasks_pertask(t, hop); 984 985 // RT kernels need frequent pauses, otherwise 986 // pause at least once per pair of jiffies. 987 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) 988 continue; 989 990 // Keep our place in the list while pausing. 991 // Nothing else traverses this list, so adding a 992 // bare list_head is OK. 993 list_add(&tmp, &t->rcu_tasks_exit_list); 994 raw_spin_unlock_irq_rcu_node(rtpcp); 995 cond_resched(); // For CONFIG_PREEMPT=n kernels 996 raw_spin_lock_irq_rcu_node(rtpcp); 997 t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); 998 list_del(&tmp); 999 j = jiffies + 1; 1000 } 1001 raw_spin_unlock_irq_rcu_node(rtpcp); 1002 } 1003 1004 if (!IS_ENABLED(CONFIG_TINY_RCU)) 1005 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); 1006 } 1007 1008 /* See if tasks are still holding out, complain if so. */ 1009 static void check_holdout_task(struct task_struct *t, 1010 bool needreport, bool *firstreport) 1011 { 1012 int cpu; 1013 1014 if (!READ_ONCE(t->rcu_tasks_holdout) || 1015 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 1016 !rcu_tasks_is_holdout(t) || 1017 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 1018 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { 1019 WRITE_ONCE(t->rcu_tasks_holdout, false); 1020 list_del_init(&t->rcu_tasks_holdout_list); 1021 put_task_struct(t); 1022 return; 1023 } 1024 rcu_request_urgent_qs_task(t); 1025 if (!needreport) 1026 return; 1027 if (*firstreport) { 1028 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 1029 *firstreport = false; 1030 } 1031 cpu = task_cpu(t); 1032 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 1033 t, ".I"[is_idle_task(t)], 1034 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 1035 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 1036 data_race(t->rcu_tasks_idle_cpu), cpu); 1037 sched_show_task(t); 1038 } 1039 1040 /* Scan the holdout lists for tasks no longer holding out. */ 1041 static void check_all_holdout_tasks(struct list_head *hop, 1042 bool needreport, bool *firstreport) 1043 { 1044 struct task_struct *t, *t1; 1045 1046 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { 1047 check_holdout_task(t, needreport, firstreport); 1048 cond_resched(); 1049 } 1050 } 1051 1052 /* Finish off the Tasks-RCU grace period. */ 1053 static void rcu_tasks_postgp(struct rcu_tasks *rtp) 1054 { 1055 /* 1056 * Because ->on_rq and ->nvcsw are not guaranteed to have a full 1057 * memory barriers prior to them in the schedule() path, memory 1058 * reordering on other CPUs could cause their RCU-tasks read-side 1059 * critical sections to extend past the end of the grace period. 1060 * However, because these ->nvcsw updates are carried out with 1061 * interrupts disabled, we can use synchronize_rcu() to force the 1062 * needed ordering on all such CPUs. 1063 * 1064 * This synchronize_rcu() also confines all ->rcu_tasks_holdout 1065 * accesses to be within the grace period, avoiding the need for 1066 * memory barriers for ->rcu_tasks_holdout accesses. 1067 * 1068 * In addition, this synchronize_rcu() waits for exiting tasks 1069 * to complete their final preempt_disable() region of execution, 1070 * enforcing the whole region before tasklist removal until 1071 * the final schedule() with TASK_DEAD state to be an RCU TASKS 1072 * read side critical section. 1073 */ 1074 synchronize_rcu(); 1075 } 1076 1077 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) 1078 { 1079 #ifndef CONFIG_TINY_RCU 1080 int rtsi; 1081 1082 rtsi = READ_ONCE(rcu_task_stall_info); 1083 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n", 1084 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq, 1085 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); 1086 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n"); 1087 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; 1088 add_timer(&tasks_rcu_exit_srcu_stall_timer); 1089 #endif // #ifndef CONFIG_TINY_RCU 1090 } 1091 1092 /** 1093 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 1094 * @rhp: structure to be used for queueing the RCU updates. 1095 * @func: actual callback function to be invoked after the grace period 1096 * 1097 * The callback function will be invoked some time after a full grace 1098 * period elapses, in other words after all currently executing RCU 1099 * read-side critical sections have completed. call_rcu_tasks() assumes 1100 * that the read-side critical sections end at a voluntary context 1101 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, 1102 * or transition to usermode execution. As such, there are no read-side 1103 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 1104 * this primitive is intended to determine that all tasks have passed 1105 * through a safe state, not so much for data-structure synchronization. 1106 * 1107 * See the description of call_rcu() for more detailed information on 1108 * memory ordering guarantees. 1109 */ 1110 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 1111 { 1112 call_rcu_tasks_generic(rhp, func, &rcu_tasks); 1113 } 1114 EXPORT_SYMBOL_GPL(call_rcu_tasks); 1115 1116 /** 1117 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 1118 * 1119 * Control will return to the caller some time after a full rcu-tasks 1120 * grace period has elapsed, in other words after all currently 1121 * executing rcu-tasks read-side critical sections have elapsed. These 1122 * read-side critical sections are delimited by calls to schedule(), 1123 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls 1124 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 1125 * 1126 * This is a very specialized primitive, intended only for a few uses in 1127 * tracing and other situations requiring manipulation of function 1128 * preambles and profiling hooks. The synchronize_rcu_tasks() function 1129 * is not (yet) intended for heavy use from multiple CPUs. 1130 * 1131 * See the description of synchronize_rcu() for more detailed information 1132 * on memory ordering guarantees. 1133 */ 1134 void synchronize_rcu_tasks(void) 1135 { 1136 synchronize_rcu_tasks_generic(&rcu_tasks); 1137 } 1138 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 1139 1140 /** 1141 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 1142 * 1143 * Although the current implementation is guaranteed to wait, it is not 1144 * obligated to, for example, if there are no pending callbacks. 1145 */ 1146 void rcu_barrier_tasks(void) 1147 { 1148 rcu_barrier_tasks_generic(&rcu_tasks); 1149 } 1150 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 1151 1152 static int rcu_tasks_lazy_ms = -1; 1153 module_param(rcu_tasks_lazy_ms, int, 0444); 1154 1155 static int __init rcu_spawn_tasks_kthread(void) 1156 { 1157 rcu_tasks.gp_sleep = HZ / 10; 1158 rcu_tasks.init_fract = HZ / 10; 1159 if (rcu_tasks_lazy_ms >= 0) 1160 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms); 1161 rcu_tasks.pregp_func = rcu_tasks_pregp_step; 1162 rcu_tasks.pertask_func = rcu_tasks_pertask; 1163 rcu_tasks.postscan_func = rcu_tasks_postscan; 1164 rcu_tasks.holdouts_func = check_all_holdout_tasks; 1165 rcu_tasks.postgp_func = rcu_tasks_postgp; 1166 rcu_tasks.wait_state = TASK_IDLE; 1167 rcu_spawn_tasks_kthread_generic(&rcu_tasks); 1168 return 0; 1169 } 1170 1171 #if !defined(CONFIG_TINY_RCU) 1172 void show_rcu_tasks_classic_gp_kthread(void) 1173 { 1174 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); 1175 } 1176 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); 1177 #endif // !defined(CONFIG_TINY_RCU) 1178 1179 struct task_struct *get_rcu_tasks_gp_kthread(void) 1180 { 1181 return rcu_tasks.kthread_ptr; 1182 } 1183 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); 1184 1185 void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq) 1186 { 1187 *flags = 0; 1188 *gp_seq = rcu_seq_current(&rcu_tasks.tasks_gp_seq); 1189 } 1190 EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data); 1191 1192 /* 1193 * Protect against tasklist scan blind spot while the task is exiting and 1194 * may be removed from the tasklist. Do this by adding the task to yet 1195 * another list. 1196 * 1197 * Note that the task will remove itself from this list, so there is no 1198 * need for get_task_struct(), except in the case where rcu_tasks_pertask() 1199 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies 1200 * the needed get_task_struct(). 1201 */ 1202 void exit_tasks_rcu_start(void) 1203 { 1204 unsigned long flags; 1205 struct rcu_tasks_percpu *rtpcp; 1206 struct task_struct *t = current; 1207 1208 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); 1209 preempt_disable(); 1210 rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); 1211 t->rcu_tasks_exit_cpu = smp_processor_id(); 1212 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1213 WARN_ON_ONCE(!rtpcp->rtp_exit_list.next); 1214 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list); 1215 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1216 preempt_enable(); 1217 } 1218 1219 /* 1220 * Remove the task from the "yet another list" because do_exit() is now 1221 * non-preemptible, allowing synchronize_rcu() to wait beyond this point. 1222 */ 1223 void exit_tasks_rcu_stop(void) 1224 { 1225 unsigned long flags; 1226 struct rcu_tasks_percpu *rtpcp; 1227 struct task_struct *t = current; 1228 1229 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); 1230 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); 1231 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1232 list_del_init(&t->rcu_tasks_exit_list); 1233 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1234 } 1235 1236 /* 1237 * Contribute to protect against tasklist scan blind spot while the 1238 * task is exiting and may be removed from the tasklist. See 1239 * corresponding synchronize_srcu() for further details. 1240 */ 1241 void exit_tasks_rcu_finish(void) 1242 { 1243 exit_tasks_rcu_stop(); 1244 exit_tasks_rcu_finish_trace(current); 1245 } 1246 1247 #else /* #ifdef CONFIG_TASKS_RCU */ 1248 void exit_tasks_rcu_start(void) { } 1249 void exit_tasks_rcu_stop(void) { } 1250 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } 1251 #endif /* #else #ifdef CONFIG_TASKS_RCU */ 1252 1253 #ifdef CONFIG_TASKS_RUDE_RCU 1254 1255 //////////////////////////////////////////////////////////////////////// 1256 // 1257 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of 1258 // passing an empty function to schedule_on_each_cpu(). This approach 1259 // provides an asynchronous call_rcu_tasks_rude() API and batching of 1260 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. 1261 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide 1262 // and induces otherwise unnecessary context switches on all online CPUs, 1263 // whether idle or not. 1264 // 1265 // Callback handling is provided by the rcu_tasks_kthread() function. 1266 // 1267 // Ordering is provided by the scheduler's context-switch code. 1268 1269 // Empty function to allow workqueues to force a context switch. 1270 static void rcu_tasks_be_rude(struct work_struct *work) 1271 { 1272 } 1273 1274 // Wait for one rude RCU-tasks grace period. 1275 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) 1276 { 1277 rtp->n_ipis += cpumask_weight(cpu_online_mask); 1278 schedule_on_each_cpu(rcu_tasks_be_rude); 1279 } 1280 1281 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); 1282 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, 1283 "RCU Tasks Rude"); 1284 1285 /** 1286 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period 1287 * @rhp: structure to be used for queueing the RCU updates. 1288 * @func: actual callback function to be invoked after the grace period 1289 * 1290 * The callback function will be invoked some time after a full grace 1291 * period elapses, in other words after all currently executing RCU 1292 * read-side critical sections have completed. call_rcu_tasks_rude() 1293 * assumes that the read-side critical sections end at context switch, 1294 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as 1295 * usermode execution is schedulable). As such, there are no read-side 1296 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 1297 * this primitive is intended to determine that all tasks have passed 1298 * through a safe state, not so much for data-structure synchronization. 1299 * 1300 * See the description of call_rcu() for more detailed information on 1301 * memory ordering guarantees. 1302 */ 1303 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) 1304 { 1305 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); 1306 } 1307 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); 1308 1309 /** 1310 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period 1311 * 1312 * Control will return to the caller some time after a rude rcu-tasks 1313 * grace period has elapsed, in other words after all currently 1314 * executing rcu-tasks read-side critical sections have elapsed. These 1315 * read-side critical sections are delimited by calls to schedule(), 1316 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable 1317 * context), and (in theory, anyway) cond_resched(). 1318 * 1319 * This is a very specialized primitive, intended only for a few uses in 1320 * tracing and other situations requiring manipulation of function preambles 1321 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not 1322 * (yet) intended for heavy use from multiple CPUs. 1323 * 1324 * See the description of synchronize_rcu() for more detailed information 1325 * on memory ordering guarantees. 1326 */ 1327 void synchronize_rcu_tasks_rude(void) 1328 { 1329 synchronize_rcu_tasks_generic(&rcu_tasks_rude); 1330 } 1331 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); 1332 1333 /** 1334 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. 1335 * 1336 * Although the current implementation is guaranteed to wait, it is not 1337 * obligated to, for example, if there are no pending callbacks. 1338 */ 1339 void rcu_barrier_tasks_rude(void) 1340 { 1341 rcu_barrier_tasks_generic(&rcu_tasks_rude); 1342 } 1343 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); 1344 1345 int rcu_tasks_rude_lazy_ms = -1; 1346 module_param(rcu_tasks_rude_lazy_ms, int, 0444); 1347 1348 static int __init rcu_spawn_tasks_rude_kthread(void) 1349 { 1350 rcu_tasks_rude.gp_sleep = HZ / 10; 1351 if (rcu_tasks_rude_lazy_ms >= 0) 1352 rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms); 1353 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); 1354 return 0; 1355 } 1356 1357 #if !defined(CONFIG_TINY_RCU) 1358 void show_rcu_tasks_rude_gp_kthread(void) 1359 { 1360 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); 1361 } 1362 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); 1363 #endif // !defined(CONFIG_TINY_RCU) 1364 1365 struct task_struct *get_rcu_tasks_rude_gp_kthread(void) 1366 { 1367 return rcu_tasks_rude.kthread_ptr; 1368 } 1369 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); 1370 1371 void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq) 1372 { 1373 *flags = 0; 1374 *gp_seq = rcu_seq_current(&rcu_tasks_rude.tasks_gp_seq); 1375 } 1376 EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data); 1377 1378 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 1379 1380 //////////////////////////////////////////////////////////////////////// 1381 // 1382 // Tracing variant of Tasks RCU. This variant is designed to be used 1383 // to protect tracing hooks, including those of BPF. This variant 1384 // therefore: 1385 // 1386 // 1. Has explicit read-side markers to allow finite grace periods 1387 // in the face of in-kernel loops for PREEMPT=n builds. 1388 // 1389 // 2. Protects code in the idle loop, exception entry/exit, and 1390 // CPU-hotplug code paths, similar to the capabilities of SRCU. 1391 // 1392 // 3. Avoids expensive read-side instructions, having overhead similar 1393 // to that of Preemptible RCU. 1394 // 1395 // There are of course downsides. For example, the grace-period code 1396 // can send IPIs to CPUs, even when those CPUs are in the idle loop or 1397 // in nohz_full userspace. If needed, these downsides can be at least 1398 // partially remedied. 1399 // 1400 // Perhaps most important, this variant of RCU does not affect the vanilla 1401 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace 1402 // readers can operate from idle, offline, and exception entry/exit in no 1403 // way allows rcu_preempt and rcu_sched readers to also do so. 1404 // 1405 // The implementation uses rcu_tasks_wait_gp(), which relies on function 1406 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() 1407 // function sets these function pointers up so that rcu_tasks_wait_gp() 1408 // invokes these functions in this order: 1409 // 1410 // rcu_tasks_trace_pregp_step(): 1411 // Disables CPU hotplug, adds all currently executing tasks to the 1412 // holdout list, then checks the state of all tasks that blocked 1413 // or were preempted within their current RCU Tasks Trace read-side 1414 // critical section, adding them to the holdout list if appropriate. 1415 // Finally, this function re-enables CPU hotplug. 1416 // The ->pertask_func() pointer is NULL, so there is no per-task processing. 1417 // rcu_tasks_trace_postscan(): 1418 // Invokes synchronize_rcu() to wait for late-stage exiting tasks 1419 // to finish exiting. 1420 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: 1421 // Scans the holdout list, attempting to identify a quiescent state 1422 // for each task on the list. If there is a quiescent state, the 1423 // corresponding task is removed from the holdout list. Once this 1424 // list is empty, the grace period has completed. 1425 // rcu_tasks_trace_postgp(): 1426 // Provides the needed full memory barrier and does debug checks. 1427 // 1428 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. 1429 // 1430 // Pre-grace-period update-side code is ordered before the grace period 1431 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period 1432 // read-side code is ordered before the grace period by atomic operations 1433 // on .b.need_qs flag of each task involved in this process, or by scheduler 1434 // context-switch ordering (for locked-down non-running readers). 1435 1436 // The lockdep state must be outside of #ifdef to be useful. 1437 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1438 static struct lock_class_key rcu_lock_trace_key; 1439 struct lockdep_map rcu_trace_lock_map = 1440 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); 1441 EXPORT_SYMBOL_GPL(rcu_trace_lock_map); 1442 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 1443 1444 #ifdef CONFIG_TASKS_TRACE_RCU 1445 1446 // Record outstanding IPIs to each CPU. No point in sending two... 1447 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); 1448 1449 // The number of detections of task quiescent state relying on 1450 // heavyweight readers executing explicit memory barriers. 1451 static unsigned long n_heavy_reader_attempts; 1452 static unsigned long n_heavy_reader_updates; 1453 static unsigned long n_heavy_reader_ofl_updates; 1454 static unsigned long n_trc_holdouts; 1455 1456 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); 1457 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, 1458 "RCU Tasks Trace"); 1459 1460 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ 1461 static u8 rcu_ld_need_qs(struct task_struct *t) 1462 { 1463 smp_mb(); // Enforce full grace-period ordering. 1464 return smp_load_acquire(&t->trc_reader_special.b.need_qs); 1465 } 1466 1467 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ 1468 static void rcu_st_need_qs(struct task_struct *t, u8 v) 1469 { 1470 smp_store_release(&t->trc_reader_special.b.need_qs, v); 1471 smp_mb(); // Enforce full grace-period ordering. 1472 } 1473 1474 /* 1475 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for 1476 * the four-byte operand-size restriction of some platforms. 1477 * 1478 * Returns the old value, which is often ignored. 1479 */ 1480 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) 1481 { 1482 union rcu_special ret; 1483 union rcu_special trs_old = READ_ONCE(t->trc_reader_special); 1484 union rcu_special trs_new = trs_old; 1485 1486 if (trs_old.b.need_qs != old) 1487 return trs_old.b.need_qs; 1488 trs_new.b.need_qs = new; 1489 1490 // Although cmpxchg() appears to KCSAN to update all four bytes, 1491 // only the .b.need_qs byte actually changes. 1492 instrument_atomic_read_write(&t->trc_reader_special.b.need_qs, 1493 sizeof(t->trc_reader_special.b.need_qs)); 1494 // Avoid false-positive KCSAN failures. 1495 ret.s = data_race(cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s)); 1496 1497 return ret.b.need_qs; 1498 } 1499 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); 1500 1501 /* 1502 * If we are the last reader, signal the grace-period kthread. 1503 * Also remove from the per-CPU list of blocked tasks. 1504 */ 1505 void rcu_read_unlock_trace_special(struct task_struct *t) 1506 { 1507 unsigned long flags; 1508 struct rcu_tasks_percpu *rtpcp; 1509 union rcu_special trs; 1510 1511 // Open-coded full-word version of rcu_ld_need_qs(). 1512 smp_mb(); // Enforce full grace-period ordering. 1513 trs = smp_load_acquire(&t->trc_reader_special); 1514 1515 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) 1516 smp_mb(); // Pairs with update-side barriers. 1517 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. 1518 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { 1519 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, 1520 TRC_NEED_QS_CHECKED); 1521 1522 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); 1523 } 1524 if (trs.b.blocked) { 1525 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); 1526 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1527 list_del_init(&t->trc_blkd_node); 1528 WRITE_ONCE(t->trc_reader_special.b.blocked, false); 1529 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1530 } 1531 WRITE_ONCE(t->trc_reader_nesting, 0); 1532 } 1533 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); 1534 1535 /* Add a newly blocked reader task to its CPU's list. */ 1536 void rcu_tasks_trace_qs_blkd(struct task_struct *t) 1537 { 1538 unsigned long flags; 1539 struct rcu_tasks_percpu *rtpcp; 1540 1541 local_irq_save(flags); 1542 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); 1543 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled 1544 t->trc_blkd_cpu = smp_processor_id(); 1545 if (!rtpcp->rtp_blkd_tasks.next) 1546 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 1547 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); 1548 WRITE_ONCE(t->trc_reader_special.b.blocked, true); 1549 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1550 } 1551 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); 1552 1553 /* Add a task to the holdout list, if it is not already on the list. */ 1554 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) 1555 { 1556 if (list_empty(&t->trc_holdout_list)) { 1557 get_task_struct(t); 1558 list_add(&t->trc_holdout_list, bhp); 1559 n_trc_holdouts++; 1560 } 1561 } 1562 1563 /* Remove a task from the holdout list, if it is in fact present. */ 1564 static void trc_del_holdout(struct task_struct *t) 1565 { 1566 if (!list_empty(&t->trc_holdout_list)) { 1567 list_del_init(&t->trc_holdout_list); 1568 put_task_struct(t); 1569 n_trc_holdouts--; 1570 } 1571 } 1572 1573 /* IPI handler to check task state. */ 1574 static void trc_read_check_handler(void *t_in) 1575 { 1576 int nesting; 1577 struct task_struct *t = current; 1578 struct task_struct *texp = t_in; 1579 1580 // If the task is no longer running on this CPU, leave. 1581 if (unlikely(texp != t)) 1582 goto reset_ipi; // Already on holdout list, so will check later. 1583 1584 // If the task is not in a read-side critical section, and 1585 // if this is the last reader, awaken the grace-period kthread. 1586 nesting = READ_ONCE(t->trc_reader_nesting); 1587 if (likely(!nesting)) { 1588 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1589 goto reset_ipi; 1590 } 1591 // If we are racing with an rcu_read_unlock_trace(), try again later. 1592 if (unlikely(nesting < 0)) 1593 goto reset_ipi; 1594 1595 // Get here if the task is in a read-side critical section. 1596 // Set its state so that it will update state for the grace-period 1597 // kthread upon exit from that critical section. 1598 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); 1599 1600 reset_ipi: 1601 // Allow future IPIs to be sent on CPU and for task. 1602 // Also order this IPI handler against any later manipulations of 1603 // the intended task. 1604 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ 1605 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ 1606 } 1607 1608 /* Callback function for scheduler to check locked-down task. */ 1609 static int trc_inspect_reader(struct task_struct *t, void *bhp_in) 1610 { 1611 struct list_head *bhp = bhp_in; 1612 int cpu = task_cpu(t); 1613 int nesting; 1614 bool ofl = cpu_is_offline(cpu); 1615 1616 if (task_curr(t) && !ofl) { 1617 // If no chance of heavyweight readers, do it the hard way. 1618 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) 1619 return -EINVAL; 1620 1621 // If heavyweight readers are enabled on the remote task, 1622 // we can inspect its state despite its currently running. 1623 // However, we cannot safely change its state. 1624 n_heavy_reader_attempts++; 1625 // Check for "running" idle tasks on offline CPUs. 1626 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) 1627 return -EINVAL; // No quiescent state, do it the hard way. 1628 n_heavy_reader_updates++; 1629 nesting = 0; 1630 } else { 1631 // The task is not running, so C-language access is safe. 1632 nesting = t->trc_reader_nesting; 1633 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); 1634 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) 1635 n_heavy_reader_ofl_updates++; 1636 } 1637 1638 // If not exiting a read-side critical section, mark as checked 1639 // so that the grace-period kthread will remove it from the 1640 // holdout list. 1641 if (!nesting) { 1642 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1643 return 0; // In QS, so done. 1644 } 1645 if (nesting < 0) 1646 return -EINVAL; // Reader transitioning, try again later. 1647 1648 // The task is in a read-side critical section, so set up its 1649 // state so that it will update state upon exit from that critical 1650 // section. 1651 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) 1652 trc_add_holdout(t, bhp); 1653 return 0; 1654 } 1655 1656 /* Attempt to extract the state for the specified task. */ 1657 static void trc_wait_for_one_reader(struct task_struct *t, 1658 struct list_head *bhp) 1659 { 1660 int cpu; 1661 1662 // If a previous IPI is still in flight, let it complete. 1663 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI 1664 return; 1665 1666 // The current task had better be in a quiescent state. 1667 if (t == current) { 1668 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1669 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1670 return; 1671 } 1672 1673 // Attempt to nail down the task for inspection. 1674 get_task_struct(t); 1675 if (!task_call_func(t, trc_inspect_reader, bhp)) { 1676 put_task_struct(t); 1677 return; 1678 } 1679 put_task_struct(t); 1680 1681 // If this task is not yet on the holdout list, then we are in 1682 // an RCU read-side critical section. Otherwise, the invocation of 1683 // trc_add_holdout() that added it to the list did the necessary 1684 // get_task_struct(). Either way, the task cannot be freed out 1685 // from under this code. 1686 1687 // If currently running, send an IPI, either way, add to list. 1688 trc_add_holdout(t, bhp); 1689 if (task_curr(t) && 1690 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { 1691 // The task is currently running, so try IPIing it. 1692 cpu = task_cpu(t); 1693 1694 // If there is already an IPI outstanding, let it happen. 1695 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) 1696 return; 1697 1698 per_cpu(trc_ipi_to_cpu, cpu) = true; 1699 t->trc_ipi_to_cpu = cpu; 1700 rcu_tasks_trace.n_ipis++; 1701 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { 1702 // Just in case there is some other reason for 1703 // failure than the target CPU being offline. 1704 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", 1705 __func__, cpu); 1706 rcu_tasks_trace.n_ipis_fails++; 1707 per_cpu(trc_ipi_to_cpu, cpu) = false; 1708 t->trc_ipi_to_cpu = -1; 1709 } 1710 } 1711 } 1712 1713 /* 1714 * Initialize for first-round processing for the specified task. 1715 * Return false if task is NULL or already taken care of, true otherwise. 1716 */ 1717 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) 1718 { 1719 // During early boot when there is only the one boot CPU, there 1720 // is no idle task for the other CPUs. Also, the grace-period 1721 // kthread is always in a quiescent state. In addition, just return 1722 // if this task is already on the list. 1723 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) 1724 return false; 1725 1726 rcu_st_need_qs(t, 0); 1727 t->trc_ipi_to_cpu = -1; 1728 return true; 1729 } 1730 1731 /* Do first-round processing for the specified task. */ 1732 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) 1733 { 1734 if (rcu_tasks_trace_pertask_prep(t, true)) 1735 trc_wait_for_one_reader(t, hop); 1736 } 1737 1738 /* Initialize for a new RCU-tasks-trace grace period. */ 1739 static void rcu_tasks_trace_pregp_step(struct list_head *hop) 1740 { 1741 LIST_HEAD(blkd_tasks); 1742 int cpu; 1743 unsigned long flags; 1744 struct rcu_tasks_percpu *rtpcp; 1745 struct task_struct *t; 1746 1747 // There shouldn't be any old IPIs, but... 1748 for_each_possible_cpu(cpu) 1749 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); 1750 1751 // Disable CPU hotplug across the CPU scan for the benefit of 1752 // any IPIs that might be needed. This also waits for all readers 1753 // in CPU-hotplug code paths. 1754 cpus_read_lock(); 1755 1756 // These rcu_tasks_trace_pertask_prep() calls are serialized to 1757 // allow safe access to the hop list. 1758 for_each_online_cpu(cpu) { 1759 rcu_read_lock(); 1760 t = cpu_curr_snapshot(cpu); 1761 if (rcu_tasks_trace_pertask_prep(t, true)) 1762 trc_add_holdout(t, hop); 1763 rcu_read_unlock(); 1764 cond_resched_tasks_rcu_qs(); 1765 } 1766 1767 // Only after all running tasks have been accounted for is it 1768 // safe to take care of the tasks that have blocked within their 1769 // current RCU tasks trace read-side critical section. 1770 for_each_possible_cpu(cpu) { 1771 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); 1772 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1773 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); 1774 while (!list_empty(&blkd_tasks)) { 1775 rcu_read_lock(); 1776 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); 1777 list_del_init(&t->trc_blkd_node); 1778 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); 1779 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1780 rcu_tasks_trace_pertask(t, hop); 1781 rcu_read_unlock(); 1782 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1783 } 1784 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1785 cond_resched_tasks_rcu_qs(); 1786 } 1787 1788 // Re-enable CPU hotplug now that the holdout list is populated. 1789 cpus_read_unlock(); 1790 } 1791 1792 /* 1793 * Do intermediate processing between task and holdout scans. 1794 */ 1795 static void rcu_tasks_trace_postscan(struct list_head *hop) 1796 { 1797 // Wait for late-stage exiting tasks to finish exiting. 1798 // These might have passed the call to exit_tasks_rcu_finish(). 1799 1800 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! 1801 synchronize_rcu(); 1802 // Any tasks that exit after this point will set 1803 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. 1804 } 1805 1806 /* Communicate task state back to the RCU tasks trace stall warning request. */ 1807 struct trc_stall_chk_rdr { 1808 int nesting; 1809 int ipi_to_cpu; 1810 u8 needqs; 1811 }; 1812 1813 static int trc_check_slow_task(struct task_struct *t, void *arg) 1814 { 1815 struct trc_stall_chk_rdr *trc_rdrp = arg; 1816 1817 if (task_curr(t) && cpu_online(task_cpu(t))) 1818 return false; // It is running, so decline to inspect it. 1819 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); 1820 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); 1821 trc_rdrp->needqs = rcu_ld_need_qs(t); 1822 return true; 1823 } 1824 1825 /* Show the state of a task stalling the current RCU tasks trace GP. */ 1826 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) 1827 { 1828 int cpu; 1829 struct trc_stall_chk_rdr trc_rdr; 1830 bool is_idle_tsk = is_idle_task(t); 1831 1832 if (*firstreport) { 1833 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); 1834 *firstreport = false; 1835 } 1836 cpu = task_cpu(t); 1837 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) 1838 pr_alert("P%d: %c%c\n", 1839 t->pid, 1840 ".I"[t->trc_ipi_to_cpu >= 0], 1841 ".i"[is_idle_tsk]); 1842 else 1843 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", 1844 t->pid, 1845 ".I"[trc_rdr.ipi_to_cpu >= 0], 1846 ".i"[is_idle_tsk], 1847 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], 1848 ".B"[!!data_race(t->trc_reader_special.b.blocked)], 1849 trc_rdr.nesting, 1850 " !CN"[trc_rdr.needqs & 0x3], 1851 " ?"[trc_rdr.needqs > 0x3], 1852 cpu, cpu_online(cpu) ? "" : "(offline)"); 1853 sched_show_task(t); 1854 } 1855 1856 /* List stalled IPIs for RCU tasks trace. */ 1857 static void show_stalled_ipi_trace(void) 1858 { 1859 int cpu; 1860 1861 for_each_possible_cpu(cpu) 1862 if (per_cpu(trc_ipi_to_cpu, cpu)) 1863 pr_alert("\tIPI outstanding to CPU %d\n", cpu); 1864 } 1865 1866 /* Do one scan of the holdout list. */ 1867 static void check_all_holdout_tasks_trace(struct list_head *hop, 1868 bool needreport, bool *firstreport) 1869 { 1870 struct task_struct *g, *t; 1871 1872 // Disable CPU hotplug across the holdout list scan for IPIs. 1873 cpus_read_lock(); 1874 1875 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { 1876 // If safe and needed, try to check the current task. 1877 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && 1878 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) 1879 trc_wait_for_one_reader(t, hop); 1880 1881 // If check succeeded, remove this task from the list. 1882 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && 1883 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) 1884 trc_del_holdout(t); 1885 else if (needreport) 1886 show_stalled_task_trace(t, firstreport); 1887 cond_resched_tasks_rcu_qs(); 1888 } 1889 1890 // Re-enable CPU hotplug now that the holdout list scan has completed. 1891 cpus_read_unlock(); 1892 1893 if (needreport) { 1894 if (*firstreport) 1895 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); 1896 show_stalled_ipi_trace(); 1897 } 1898 } 1899 1900 static void rcu_tasks_trace_empty_fn(void *unused) 1901 { 1902 } 1903 1904 /* Wait for grace period to complete and provide ordering. */ 1905 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) 1906 { 1907 int cpu; 1908 1909 // Wait for any lingering IPI handlers to complete. Note that 1910 // if a CPU has gone offline or transitioned to userspace in the 1911 // meantime, all IPI handlers should have been drained beforehand. 1912 // Yes, this assumes that CPUs process IPIs in order. If that ever 1913 // changes, there will need to be a recheck and/or timed wait. 1914 for_each_online_cpu(cpu) 1915 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) 1916 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); 1917 1918 smp_mb(); // Caller's code must be ordered after wakeup. 1919 // Pairs with pretty much every ordering primitive. 1920 } 1921 1922 /* Report any needed quiescent state for this exiting task. */ 1923 static void exit_tasks_rcu_finish_trace(struct task_struct *t) 1924 { 1925 union rcu_special trs = READ_ONCE(t->trc_reader_special); 1926 1927 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1928 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1929 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) 1930 rcu_read_unlock_trace_special(t); 1931 else 1932 WRITE_ONCE(t->trc_reader_nesting, 0); 1933 } 1934 1935 /** 1936 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period 1937 * @rhp: structure to be used for queueing the RCU updates. 1938 * @func: actual callback function to be invoked after the grace period 1939 * 1940 * The callback function will be invoked some time after a trace rcu-tasks 1941 * grace period elapses, in other words after all currently executing 1942 * trace rcu-tasks read-side critical sections have completed. These 1943 * read-side critical sections are delimited by calls to rcu_read_lock_trace() 1944 * and rcu_read_unlock_trace(). 1945 * 1946 * See the description of call_rcu() for more detailed information on 1947 * memory ordering guarantees. 1948 */ 1949 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) 1950 { 1951 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); 1952 } 1953 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); 1954 1955 /** 1956 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period 1957 * 1958 * Control will return to the caller some time after a trace rcu-tasks 1959 * grace period has elapsed, in other words after all currently executing 1960 * trace rcu-tasks read-side critical sections have elapsed. These read-side 1961 * critical sections are delimited by calls to rcu_read_lock_trace() 1962 * and rcu_read_unlock_trace(). 1963 * 1964 * This is a very specialized primitive, intended only for a few uses in 1965 * tracing and other situations requiring manipulation of function preambles 1966 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not 1967 * (yet) intended for heavy use from multiple CPUs. 1968 * 1969 * See the description of synchronize_rcu() for more detailed information 1970 * on memory ordering guarantees. 1971 */ 1972 void synchronize_rcu_tasks_trace(void) 1973 { 1974 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); 1975 synchronize_rcu_tasks_generic(&rcu_tasks_trace); 1976 } 1977 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); 1978 1979 /** 1980 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. 1981 * 1982 * Although the current implementation is guaranteed to wait, it is not 1983 * obligated to, for example, if there are no pending callbacks. 1984 */ 1985 void rcu_barrier_tasks_trace(void) 1986 { 1987 rcu_barrier_tasks_generic(&rcu_tasks_trace); 1988 } 1989 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); 1990 1991 int rcu_tasks_trace_lazy_ms = -1; 1992 module_param(rcu_tasks_trace_lazy_ms, int, 0444); 1993 1994 static int __init rcu_spawn_tasks_trace_kthread(void) 1995 { 1996 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { 1997 rcu_tasks_trace.gp_sleep = HZ / 10; 1998 rcu_tasks_trace.init_fract = HZ / 10; 1999 } else { 2000 rcu_tasks_trace.gp_sleep = HZ / 200; 2001 if (rcu_tasks_trace.gp_sleep <= 0) 2002 rcu_tasks_trace.gp_sleep = 1; 2003 rcu_tasks_trace.init_fract = HZ / 200; 2004 if (rcu_tasks_trace.init_fract <= 0) 2005 rcu_tasks_trace.init_fract = 1; 2006 } 2007 if (rcu_tasks_trace_lazy_ms >= 0) 2008 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms); 2009 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; 2010 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; 2011 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; 2012 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; 2013 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); 2014 return 0; 2015 } 2016 2017 #if !defined(CONFIG_TINY_RCU) 2018 void show_rcu_tasks_trace_gp_kthread(void) 2019 { 2020 char buf[64]; 2021 2022 snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu", 2023 data_race(n_trc_holdouts), 2024 data_race(n_heavy_reader_ofl_updates), 2025 data_race(n_heavy_reader_updates), 2026 data_race(n_heavy_reader_attempts)); 2027 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); 2028 } 2029 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); 2030 #endif // !defined(CONFIG_TINY_RCU) 2031 2032 struct task_struct *get_rcu_tasks_trace_gp_kthread(void) 2033 { 2034 return rcu_tasks_trace.kthread_ptr; 2035 } 2036 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); 2037 2038 void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq) 2039 { 2040 *flags = 0; 2041 *gp_seq = rcu_seq_current(&rcu_tasks_trace.tasks_gp_seq); 2042 } 2043 EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data); 2044 2045 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ 2046 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } 2047 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ 2048 2049 #ifndef CONFIG_TINY_RCU 2050 void show_rcu_tasks_gp_kthreads(void) 2051 { 2052 show_rcu_tasks_classic_gp_kthread(); 2053 show_rcu_tasks_rude_gp_kthread(); 2054 show_rcu_tasks_trace_gp_kthread(); 2055 } 2056 #endif /* #ifndef CONFIG_TINY_RCU */ 2057 2058 #ifdef CONFIG_PROVE_RCU 2059 struct rcu_tasks_test_desc { 2060 struct rcu_head rh; 2061 const char *name; 2062 bool notrun; 2063 unsigned long runstart; 2064 }; 2065 2066 static struct rcu_tasks_test_desc tests[] = { 2067 { 2068 .name = "call_rcu_tasks()", 2069 /* If not defined, the test is skipped. */ 2070 .notrun = IS_ENABLED(CONFIG_TASKS_RCU), 2071 }, 2072 { 2073 .name = "call_rcu_tasks_rude()", 2074 /* If not defined, the test is skipped. */ 2075 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU), 2076 }, 2077 { 2078 .name = "call_rcu_tasks_trace()", 2079 /* If not defined, the test is skipped. */ 2080 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) 2081 } 2082 }; 2083 2084 static void test_rcu_tasks_callback(struct rcu_head *rhp) 2085 { 2086 struct rcu_tasks_test_desc *rttd = 2087 container_of(rhp, struct rcu_tasks_test_desc, rh); 2088 2089 pr_info("Callback from %s invoked.\n", rttd->name); 2090 2091 rttd->notrun = false; 2092 } 2093 2094 static void rcu_tasks_initiate_self_tests(void) 2095 { 2096 #ifdef CONFIG_TASKS_RCU 2097 pr_info("Running RCU Tasks wait API self tests\n"); 2098 tests[0].runstart = jiffies; 2099 synchronize_rcu_tasks(); 2100 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); 2101 #endif 2102 2103 #ifdef CONFIG_TASKS_RUDE_RCU 2104 pr_info("Running RCU Tasks Rude wait API self tests\n"); 2105 tests[1].runstart = jiffies; 2106 synchronize_rcu_tasks_rude(); 2107 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); 2108 #endif 2109 2110 #ifdef CONFIG_TASKS_TRACE_RCU 2111 pr_info("Running RCU Tasks Trace wait API self tests\n"); 2112 tests[2].runstart = jiffies; 2113 synchronize_rcu_tasks_trace(); 2114 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); 2115 #endif 2116 } 2117 2118 /* 2119 * Return: 0 - test passed 2120 * 1 - test failed, but have not timed out yet 2121 * -1 - test failed and timed out 2122 */ 2123 static int rcu_tasks_verify_self_tests(void) 2124 { 2125 int ret = 0; 2126 int i; 2127 unsigned long bst = rcu_task_stall_timeout; 2128 2129 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) 2130 bst = RCU_TASK_BOOT_STALL_TIMEOUT; 2131 for (i = 0; i < ARRAY_SIZE(tests); i++) { 2132 while (tests[i].notrun) { // still hanging. 2133 if (time_after(jiffies, tests[i].runstart + bst)) { 2134 pr_err("%s has failed boot-time tests.\n", tests[i].name); 2135 ret = -1; 2136 break; 2137 } 2138 ret = 1; 2139 break; 2140 } 2141 } 2142 WARN_ON(ret < 0); 2143 2144 return ret; 2145 } 2146 2147 /* 2148 * Repeat the rcu_tasks_verify_self_tests() call once every second until the 2149 * test passes or has timed out. 2150 */ 2151 static struct delayed_work rcu_tasks_verify_work; 2152 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) 2153 { 2154 int ret = rcu_tasks_verify_self_tests(); 2155 2156 if (ret <= 0) 2157 return; 2158 2159 /* Test fails but not timed out yet, reschedule another check */ 2160 schedule_delayed_work(&rcu_tasks_verify_work, HZ); 2161 } 2162 2163 static int rcu_tasks_verify_schedule_work(void) 2164 { 2165 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); 2166 rcu_tasks_verify_work_fn(NULL); 2167 return 0; 2168 } 2169 late_initcall(rcu_tasks_verify_schedule_work); 2170 #else /* #ifdef CONFIG_PROVE_RCU */ 2171 static void rcu_tasks_initiate_self_tests(void) { } 2172 #endif /* #else #ifdef CONFIG_PROVE_RCU */ 2173 2174 void __init tasks_cblist_init_generic(void) 2175 { 2176 lockdep_assert_irqs_disabled(); 2177 WARN_ON(num_online_cpus() > 1); 2178 2179 #ifdef CONFIG_TASKS_RCU 2180 cblist_init_generic(&rcu_tasks); 2181 #endif 2182 2183 #ifdef CONFIG_TASKS_RUDE_RCU 2184 cblist_init_generic(&rcu_tasks_rude); 2185 #endif 2186 2187 #ifdef CONFIG_TASKS_TRACE_RCU 2188 cblist_init_generic(&rcu_tasks_trace); 2189 #endif 2190 } 2191 2192 void __init rcu_init_tasks_generic(void) 2193 { 2194 #ifdef CONFIG_TASKS_RCU 2195 rcu_spawn_tasks_kthread(); 2196 #endif 2197 2198 #ifdef CONFIG_TASKS_RUDE_RCU 2199 rcu_spawn_tasks_rude_kthread(); 2200 #endif 2201 2202 #ifdef CONFIG_TASKS_TRACE_RCU 2203 rcu_spawn_tasks_trace_kthread(); 2204 #endif 2205 2206 // Run the self-tests. 2207 rcu_tasks_initiate_self_tests(); 2208 } 2209 2210 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 2211 static inline void rcu_tasks_bootup_oddness(void) {} 2212 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 2213