1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Task-based RCU implementations. 4 * 5 * Copyright (C) 2020 Paul E. McKenney 6 */ 7 8 #ifdef CONFIG_TASKS_RCU_GENERIC 9 #include "rcu_segcblist.h" 10 11 //////////////////////////////////////////////////////////////////////// 12 // 13 // Generic data structures. 14 15 struct rcu_tasks; 16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); 17 typedef void (*pregp_func_t)(struct list_head *hop); 18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); 19 typedef void (*postscan_func_t)(struct list_head *hop); 20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); 21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp); 22 23 /** 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. 25 * @cblist: Callback list. 26 * @lock: Lock protecting per-CPU callback list. 27 * @rtp_jiffies: Jiffies counter value for statistics. 28 * @lazy_timer: Timer to unlazify callbacks. 29 * @urgent_gp: Number of additional non-lazy grace periods. 30 * @rtp_n_lock_retries: Rough lock-contention statistic. 31 * @rtp_work: Work queue for invoking callbacks. 32 * @rtp_irq_work: IRQ work queue for deferred wakeups. 33 * @barrier_q_head: RCU callback for barrier operation. 34 * @rtp_blkd_tasks: List of tasks blocked as readers. 35 * @rtp_exit_list: List of tasks in the latter portion of do_exit(). 36 * @cpu: CPU number corresponding to this entry. 37 * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure. 38 * @rtpp: Pointer to the rcu_tasks structure. 39 */ 40 struct rcu_tasks_percpu { 41 struct rcu_segcblist cblist; 42 raw_spinlock_t __private lock; 43 unsigned long rtp_jiffies; 44 unsigned long rtp_n_lock_retries; 45 struct timer_list lazy_timer; 46 unsigned int urgent_gp; 47 struct work_struct rtp_work; 48 struct irq_work rtp_irq_work; 49 struct rcu_head barrier_q_head; 50 struct list_head rtp_blkd_tasks; 51 struct list_head rtp_exit_list; 52 int cpu; 53 int index; 54 struct rcu_tasks *rtpp; 55 }; 56 57 /** 58 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. 59 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. 60 * @cbs_gbl_lock: Lock protecting callback list. 61 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. 62 * @gp_func: This flavor's grace-period-wait function. 63 * @gp_state: Grace period's most recent state transition (debugging). 64 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. 65 * @init_fract: Initial backoff sleep interval. 66 * @gp_jiffies: Time of last @gp_state transition. 67 * @gp_start: Most recent grace-period start in jiffies. 68 * @tasks_gp_seq: Number of grace periods completed since boot in upper bits. 69 * @n_ipis: Number of IPIs sent to encourage grace periods to end. 70 * @n_ipis_fails: Number of IPI-send failures. 71 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. 72 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. 73 * @pregp_func: This flavor's pre-grace-period function (optional). 74 * @pertask_func: This flavor's per-task scan function (optional). 75 * @postscan_func: This flavor's post-task scan function (optional). 76 * @holdouts_func: This flavor's holdout-list scan function (optional). 77 * @postgp_func: This flavor's post-grace-period function (optional). 78 * @call_func: This flavor's call_rcu()-equivalent function. 79 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE). 80 * @rtpcpu: This flavor's rcu_tasks_percpu structure. 81 * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask. 82 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. 83 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. 84 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. 85 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. 86 * @barrier_q_mutex: Serialize barrier operations. 87 * @barrier_q_count: Number of queues being waited on. 88 * @barrier_q_completion: Barrier wait/wakeup mechanism. 89 * @barrier_q_seq: Sequence number for barrier operations. 90 * @barrier_q_start: Most recent barrier start in jiffies. 91 * @name: This flavor's textual name. 92 * @kname: This flavor's kthread name. 93 */ 94 struct rcu_tasks { 95 struct rcuwait cbs_wait; 96 raw_spinlock_t cbs_gbl_lock; 97 struct mutex tasks_gp_mutex; 98 int gp_state; 99 int gp_sleep; 100 int init_fract; 101 unsigned long gp_jiffies; 102 unsigned long gp_start; 103 unsigned long tasks_gp_seq; 104 unsigned long n_ipis; 105 unsigned long n_ipis_fails; 106 struct task_struct *kthread_ptr; 107 unsigned long lazy_jiffies; 108 rcu_tasks_gp_func_t gp_func; 109 pregp_func_t pregp_func; 110 pertask_func_t pertask_func; 111 postscan_func_t postscan_func; 112 holdouts_func_t holdouts_func; 113 postgp_func_t postgp_func; 114 call_rcu_func_t call_func; 115 unsigned int wait_state; 116 struct rcu_tasks_percpu __percpu *rtpcpu; 117 struct rcu_tasks_percpu **rtpcp_array; 118 int percpu_enqueue_shift; 119 int percpu_enqueue_lim; 120 int percpu_dequeue_lim; 121 unsigned long percpu_dequeue_gpseq; 122 struct mutex barrier_q_mutex; 123 atomic_t barrier_q_count; 124 struct completion barrier_q_completion; 125 unsigned long barrier_q_seq; 126 unsigned long barrier_q_start; 127 char *name; 128 char *kname; 129 }; 130 131 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); 132 133 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ 134 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ 135 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ 136 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ 137 }; \ 138 static struct rcu_tasks rt_name = \ 139 { \ 140 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ 141 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ 142 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ 143 .gp_func = gp, \ 144 .call_func = call, \ 145 .wait_state = TASK_UNINTERRUPTIBLE, \ 146 .rtpcpu = &rt_name ## __percpu, \ 147 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ 148 .name = n, \ 149 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ 150 .percpu_enqueue_lim = 1, \ 151 .percpu_dequeue_lim = 1, \ 152 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ 153 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ 154 .kname = #rt_name, \ 155 } 156 157 #ifdef CONFIG_TASKS_RCU 158 159 /* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */ 160 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); 161 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); 162 #endif 163 164 /* Avoid IPIing CPUs early in the grace period. */ 165 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) 166 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; 167 module_param(rcu_task_ipi_delay, int, 0644); 168 169 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 170 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) 171 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) 172 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; 173 module_param(rcu_task_stall_timeout, int, 0644); 174 #define RCU_TASK_STALL_INFO (HZ * 10) 175 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; 176 module_param(rcu_task_stall_info, int, 0644); 177 static int rcu_task_stall_info_mult __read_mostly = 3; 178 module_param(rcu_task_stall_info_mult, int, 0444); 179 180 static int rcu_task_enqueue_lim __read_mostly = -1; 181 module_param(rcu_task_enqueue_lim, int, 0444); 182 183 static bool rcu_task_cb_adjust; 184 static int rcu_task_contend_lim __read_mostly = 100; 185 module_param(rcu_task_contend_lim, int, 0444); 186 static int rcu_task_collapse_lim __read_mostly = 10; 187 module_param(rcu_task_collapse_lim, int, 0444); 188 static int rcu_task_lazy_lim __read_mostly = 32; 189 module_param(rcu_task_lazy_lim, int, 0444); 190 191 static int rcu_task_cpu_ids; 192 193 /* RCU tasks grace-period state for debugging. */ 194 #define RTGS_INIT 0 195 #define RTGS_WAIT_WAIT_CBS 1 196 #define RTGS_WAIT_GP 2 197 #define RTGS_PRE_WAIT_GP 3 198 #define RTGS_SCAN_TASKLIST 4 199 #define RTGS_POST_SCAN_TASKLIST 5 200 #define RTGS_WAIT_SCAN_HOLDOUTS 6 201 #define RTGS_SCAN_HOLDOUTS 7 202 #define RTGS_POST_GP 8 203 #define RTGS_WAIT_READERS 9 204 #define RTGS_INVOKE_CBS 10 205 #define RTGS_WAIT_CBS 11 206 #ifndef CONFIG_TINY_RCU 207 static const char * const rcu_tasks_gp_state_names[] = { 208 "RTGS_INIT", 209 "RTGS_WAIT_WAIT_CBS", 210 "RTGS_WAIT_GP", 211 "RTGS_PRE_WAIT_GP", 212 "RTGS_SCAN_TASKLIST", 213 "RTGS_POST_SCAN_TASKLIST", 214 "RTGS_WAIT_SCAN_HOLDOUTS", 215 "RTGS_SCAN_HOLDOUTS", 216 "RTGS_POST_GP", 217 "RTGS_WAIT_READERS", 218 "RTGS_INVOKE_CBS", 219 "RTGS_WAIT_CBS", 220 }; 221 #endif /* #ifndef CONFIG_TINY_RCU */ 222 223 //////////////////////////////////////////////////////////////////////// 224 // 225 // Generic code. 226 227 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); 228 229 /* Record grace-period phase and time. */ 230 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) 231 { 232 rtp->gp_state = newstate; 233 rtp->gp_jiffies = jiffies; 234 } 235 236 #ifndef CONFIG_TINY_RCU 237 /* Return state name. */ 238 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) 239 { 240 int i = data_race(rtp->gp_state); // Let KCSAN detect update races 241 int j = READ_ONCE(i); // Prevent the compiler from reading twice 242 243 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) 244 return "???"; 245 return rcu_tasks_gp_state_names[j]; 246 } 247 #endif /* #ifndef CONFIG_TINY_RCU */ 248 249 // Initialize per-CPU callback lists for the specified flavor of 250 // Tasks RCU. Do not enqueue callbacks before this function is invoked. 251 static void cblist_init_generic(struct rcu_tasks *rtp) 252 { 253 int cpu; 254 int lim; 255 int shift; 256 int maxcpu; 257 int index = 0; 258 259 if (rcu_task_enqueue_lim < 0) { 260 rcu_task_enqueue_lim = 1; 261 rcu_task_cb_adjust = true; 262 } else if (rcu_task_enqueue_lim == 0) { 263 rcu_task_enqueue_lim = 1; 264 } 265 lim = rcu_task_enqueue_lim; 266 267 rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL); 268 BUG_ON(!rtp->rtpcp_array); 269 270 for_each_possible_cpu(cpu) { 271 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 272 273 WARN_ON_ONCE(!rtpcp); 274 if (cpu) 275 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); 276 if (rcu_segcblist_empty(&rtpcp->cblist)) 277 rcu_segcblist_init(&rtpcp->cblist); 278 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); 279 rtpcp->cpu = cpu; 280 rtpcp->rtpp = rtp; 281 rtpcp->index = index; 282 rtp->rtpcp_array[index] = rtpcp; 283 index++; 284 if (!rtpcp->rtp_blkd_tasks.next) 285 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 286 if (!rtpcp->rtp_exit_list.next) 287 INIT_LIST_HEAD(&rtpcp->rtp_exit_list); 288 rtpcp->barrier_q_head.next = &rtpcp->barrier_q_head; 289 maxcpu = cpu; 290 } 291 292 rcu_task_cpu_ids = maxcpu + 1; 293 if (lim > rcu_task_cpu_ids) 294 lim = rcu_task_cpu_ids; 295 shift = ilog2(rcu_task_cpu_ids / lim); 296 if (((rcu_task_cpu_ids - 1) >> shift) >= lim) 297 shift++; 298 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); 299 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); 300 smp_store_release(&rtp->percpu_enqueue_lim, lim); 301 302 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n", 303 rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), 304 rcu_task_cb_adjust, rcu_task_cpu_ids); 305 } 306 307 // Compute wakeup time for lazy callback timer. 308 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) 309 { 310 return jiffies + rtp->lazy_jiffies; 311 } 312 313 // Timer handler that unlazifies lazy callbacks. 314 static void call_rcu_tasks_generic_timer(struct timer_list *tlp) 315 { 316 unsigned long flags; 317 bool needwake = false; 318 struct rcu_tasks *rtp; 319 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer); 320 321 rtp = rtpcp->rtpp; 322 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 323 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { 324 if (!rtpcp->urgent_gp) 325 rtpcp->urgent_gp = 1; 326 needwake = true; 327 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); 328 } 329 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 330 if (needwake) 331 rcuwait_wake_up(&rtp->cbs_wait); 332 } 333 334 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). 335 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) 336 { 337 struct rcu_tasks *rtp; 338 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); 339 340 rtp = rtpcp->rtpp; 341 rcuwait_wake_up(&rtp->cbs_wait); 342 } 343 344 // Enqueue a callback for the specified flavor of Tasks RCU. 345 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, 346 struct rcu_tasks *rtp) 347 { 348 int chosen_cpu; 349 unsigned long flags; 350 bool havekthread = smp_load_acquire(&rtp->kthread_ptr); 351 int ideal_cpu; 352 unsigned long j; 353 bool needadjust = false; 354 bool needwake; 355 struct rcu_tasks_percpu *rtpcp; 356 357 rhp->next = NULL; 358 rhp->func = func; 359 local_irq_save(flags); 360 rcu_read_lock(); 361 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); 362 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); 363 WARN_ON_ONCE(chosen_cpu >= rcu_task_cpu_ids); 364 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); 365 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. 366 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 367 j = jiffies; 368 if (rtpcp->rtp_jiffies != j) { 369 rtpcp->rtp_jiffies = j; 370 rtpcp->rtp_n_lock_retries = 0; 371 } 372 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && 373 READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids) 374 needadjust = true; // Defer adjustment to avoid deadlock. 375 } 376 // Queuing callbacks before initialization not yet supported. 377 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) 378 rcu_segcblist_init(&rtpcp->cblist); 379 needwake = (func == wakeme_after_rcu) || 380 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); 381 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { 382 if (rtp->lazy_jiffies) 383 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); 384 else 385 needwake = rcu_segcblist_empty(&rtpcp->cblist); 386 } 387 if (needwake) 388 rtpcp->urgent_gp = 3; 389 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); 390 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 391 if (unlikely(needadjust)) { 392 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 393 if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) { 394 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); 395 WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids); 396 smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids); 397 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); 398 } 399 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 400 } 401 rcu_read_unlock(); 402 /* We can't create the thread unless interrupts are enabled. */ 403 if (needwake && READ_ONCE(rtp->kthread_ptr)) 404 irq_work_queue(&rtpcp->rtp_irq_work); 405 } 406 407 // RCU callback function for rcu_barrier_tasks_generic(). 408 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) 409 { 410 struct rcu_tasks *rtp; 411 struct rcu_tasks_percpu *rtpcp; 412 413 rhp->next = rhp; // Mark the callback as having been invoked. 414 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); 415 rtp = rtpcp->rtpp; 416 if (atomic_dec_and_test(&rtp->barrier_q_count)) 417 complete(&rtp->barrier_q_completion); 418 } 419 420 // Wait for all in-flight callbacks for the specified RCU Tasks flavor. 421 // Operates in a manner similar to rcu_barrier(). 422 static void __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp) 423 { 424 int cpu; 425 unsigned long flags; 426 struct rcu_tasks_percpu *rtpcp; 427 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); 428 429 mutex_lock(&rtp->barrier_q_mutex); 430 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { 431 smp_mb(); 432 mutex_unlock(&rtp->barrier_q_mutex); 433 return; 434 } 435 rtp->barrier_q_start = jiffies; 436 rcu_seq_start(&rtp->barrier_q_seq); 437 init_completion(&rtp->barrier_q_completion); 438 atomic_set(&rtp->barrier_q_count, 2); 439 for_each_possible_cpu(cpu) { 440 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) 441 break; 442 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 443 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; 444 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 445 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) 446 atomic_inc(&rtp->barrier_q_count); 447 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 448 } 449 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) 450 complete(&rtp->barrier_q_completion); 451 wait_for_completion(&rtp->barrier_q_completion); 452 rcu_seq_end(&rtp->barrier_q_seq); 453 mutex_unlock(&rtp->barrier_q_mutex); 454 } 455 456 // Advance callbacks and indicate whether either a grace period or 457 // callback invocation is needed. 458 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) 459 { 460 int cpu; 461 int dequeue_limit; 462 unsigned long flags; 463 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); 464 long n; 465 long ncbs = 0; 466 long ncbsnz = 0; 467 int needgpcb = 0; 468 469 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); 470 for (cpu = 0; cpu < dequeue_limit; cpu++) { 471 if (!cpu_possible(cpu)) 472 continue; 473 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 474 475 /* Advance and accelerate any new callbacks. */ 476 if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) 477 continue; 478 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 479 // Should we shrink down to a single callback queue? 480 n = rcu_segcblist_n_cbs(&rtpcp->cblist); 481 if (n) { 482 ncbs += n; 483 if (cpu > 0) 484 ncbsnz += n; 485 } 486 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 487 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 488 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { 489 if (rtp->lazy_jiffies) 490 rtpcp->urgent_gp--; 491 needgpcb |= 0x3; 492 } else if (rcu_segcblist_empty(&rtpcp->cblist)) { 493 rtpcp->urgent_gp = 0; 494 } 495 if (rcu_segcblist_ready_cbs(&rtpcp->cblist)) 496 needgpcb |= 0x1; 497 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 498 } 499 500 // Shrink down to a single callback queue if appropriate. 501 // This is done in two stages: (1) If there are no more than 502 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other 503 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, 504 // if there has not been an increase in callbacks, limit dequeuing 505 // to CPU 0. Note the matching RCU read-side critical section in 506 // call_rcu_tasks_generic(). 507 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { 508 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 509 if (rtp->percpu_enqueue_lim > 1) { 510 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids)); 511 smp_store_release(&rtp->percpu_enqueue_lim, 1); 512 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); 513 gpdone = false; 514 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); 515 } 516 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 517 } 518 if (rcu_task_cb_adjust && !ncbsnz && gpdone) { 519 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 520 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { 521 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); 522 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); 523 } 524 if (rtp->percpu_dequeue_lim == 1) { 525 for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) { 526 if (!cpu_possible(cpu)) 527 continue; 528 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 529 530 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); 531 } 532 } 533 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 534 } 535 536 return needgpcb; 537 } 538 539 // Advance callbacks and invoke any that are ready. 540 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) 541 { 542 int cpuwq; 543 unsigned long flags; 544 int len; 545 int index; 546 struct rcu_head *rhp; 547 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 548 struct rcu_tasks_percpu *rtpcp_next; 549 550 index = rtpcp->index * 2 + 1; 551 if (index < num_possible_cpus()) { 552 rtpcp_next = rtp->rtpcp_array[index]; 553 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 554 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND; 555 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); 556 index++; 557 if (index < num_possible_cpus()) { 558 rtpcp_next = rtp->rtpcp_array[index]; 559 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 560 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND; 561 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); 562 } 563 } 564 } 565 } 566 567 if (rcu_segcblist_empty(&rtpcp->cblist)) 568 return; 569 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 570 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 571 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); 572 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 573 len = rcl.len; 574 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { 575 debug_rcu_head_callback(rhp); 576 local_bh_disable(); 577 rhp->func(rhp); 578 local_bh_enable(); 579 cond_resched(); 580 } 581 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 582 rcu_segcblist_add_len(&rtpcp->cblist, -len); 583 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 584 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 585 } 586 587 // Workqueue flood to advance callbacks and invoke any that are ready. 588 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) 589 { 590 struct rcu_tasks *rtp; 591 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); 592 593 rtp = rtpcp->rtpp; 594 rcu_tasks_invoke_cbs(rtp, rtpcp); 595 } 596 597 // Wait for one grace period. 598 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) 599 { 600 int needgpcb; 601 602 mutex_lock(&rtp->tasks_gp_mutex); 603 604 // If there were none, wait a bit and start over. 605 if (unlikely(midboot)) { 606 needgpcb = 0x2; 607 } else { 608 mutex_unlock(&rtp->tasks_gp_mutex); 609 set_tasks_gp_state(rtp, RTGS_WAIT_CBS); 610 rcuwait_wait_event(&rtp->cbs_wait, 611 (needgpcb = rcu_tasks_need_gpcb(rtp)), 612 TASK_IDLE); 613 mutex_lock(&rtp->tasks_gp_mutex); 614 } 615 616 if (needgpcb & 0x2) { 617 // Wait for one grace period. 618 set_tasks_gp_state(rtp, RTGS_WAIT_GP); 619 rtp->gp_start = jiffies; 620 rcu_seq_start(&rtp->tasks_gp_seq); 621 rtp->gp_func(rtp); 622 rcu_seq_end(&rtp->tasks_gp_seq); 623 } 624 625 // Invoke callbacks. 626 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); 627 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); 628 mutex_unlock(&rtp->tasks_gp_mutex); 629 } 630 631 // RCU-tasks kthread that detects grace periods and invokes callbacks. 632 static int __noreturn rcu_tasks_kthread(void *arg) 633 { 634 int cpu; 635 struct rcu_tasks *rtp = arg; 636 637 for_each_possible_cpu(cpu) { 638 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 639 640 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); 641 rtpcp->urgent_gp = 1; 642 } 643 644 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 645 housekeeping_affine(current, HK_TYPE_RCU); 646 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! 647 648 /* 649 * Each pass through the following loop makes one check for 650 * newly arrived callbacks, and, if there are some, waits for 651 * one RCU-tasks grace period and then invokes the callbacks. 652 * This loop is terminated by the system going down. ;-) 653 */ 654 for (;;) { 655 // Wait for one grace period and invoke any callbacks 656 // that are ready. 657 rcu_tasks_one_gp(rtp, false); 658 659 // Paranoid sleep to keep this from entering a tight loop. 660 schedule_timeout_idle(rtp->gp_sleep); 661 } 662 } 663 664 // Wait for a grace period for the specified flavor of Tasks RCU. 665 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) 666 { 667 /* Complain if the scheduler has not started. */ 668 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 669 "synchronize_%s() called too soon", rtp->name)) 670 return; 671 672 // If the grace-period kthread is running, use it. 673 if (READ_ONCE(rtp->kthread_ptr)) { 674 wait_rcu_gp_state(rtp->wait_state, rtp->call_func); 675 return; 676 } 677 rcu_tasks_one_gp(rtp, true); 678 } 679 680 /* Spawn RCU-tasks grace-period kthread. */ 681 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) 682 { 683 struct task_struct *t; 684 685 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); 686 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) 687 return; 688 smp_mb(); /* Ensure others see full kthread. */ 689 } 690 691 #ifndef CONFIG_TINY_RCU 692 693 /* 694 * Print any non-default Tasks RCU settings. 695 */ 696 static void __init rcu_tasks_bootup_oddness(void) 697 { 698 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 699 int rtsimc; 700 701 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) 702 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); 703 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); 704 if (rtsimc != rcu_task_stall_info_mult) { 705 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); 706 rcu_task_stall_info_mult = rtsimc; 707 } 708 #endif /* #ifdef CONFIG_TASKS_RCU */ 709 #ifdef CONFIG_TASKS_RCU 710 pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); 711 #endif /* #ifdef CONFIG_TASKS_RCU */ 712 #ifdef CONFIG_TASKS_RUDE_RCU 713 pr_info("\tRude variant of Tasks RCU enabled.\n"); 714 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 715 #ifdef CONFIG_TASKS_TRACE_RCU 716 pr_info("\tTracing variant of Tasks RCU enabled.\n"); 717 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 718 } 719 720 721 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ 722 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) 723 { 724 int cpu; 725 bool havecbs = false; 726 bool haveurgent = false; 727 bool haveurgentcbs = false; 728 729 for_each_possible_cpu(cpu) { 730 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 731 732 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) 733 havecbs = true; 734 if (data_race(rtpcp->urgent_gp)) 735 haveurgent = true; 736 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) 737 haveurgentcbs = true; 738 if (havecbs && haveurgent && haveurgentcbs) 739 break; 740 } 741 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n", 742 rtp->kname, 743 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), 744 jiffies - data_race(rtp->gp_jiffies), 745 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), 746 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), 747 ".k"[!!data_race(rtp->kthread_ptr)], 748 ".C"[havecbs], 749 ".u"[haveurgent], 750 ".U"[haveurgentcbs], 751 rtp->lazy_jiffies, 752 s); 753 } 754 755 /* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */ 756 static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt, 757 char *tf, char *tst) 758 { 759 cpumask_var_t cm; 760 int cpu; 761 bool gotcb = false; 762 unsigned long j = jiffies; 763 764 pr_alert("%s%s Tasks%s RCU g%ld gp_start %lu gp_jiffies %lu gp_state %d (%s).\n", 765 tt, tf, tst, data_race(rtp->tasks_gp_seq), 766 j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies), 767 data_race(rtp->gp_state), tasks_gp_state_getname(rtp)); 768 pr_alert("\tEnqueue shift %d limit %d Dequeue limit %d gpseq %lu.\n", 769 data_race(rtp->percpu_enqueue_shift), 770 data_race(rtp->percpu_enqueue_lim), 771 data_race(rtp->percpu_dequeue_lim), 772 data_race(rtp->percpu_dequeue_gpseq)); 773 (void)zalloc_cpumask_var(&cm, GFP_KERNEL); 774 pr_alert("\tCallback counts:"); 775 for_each_possible_cpu(cpu) { 776 long n; 777 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 778 779 if (cpumask_available(cm) && !rcu_barrier_cb_is_done(&rtpcp->barrier_q_head)) 780 cpumask_set_cpu(cpu, cm); 781 n = rcu_segcblist_n_cbs(&rtpcp->cblist); 782 if (!n) 783 continue; 784 pr_cont(" %d:%ld", cpu, n); 785 gotcb = true; 786 } 787 if (gotcb) 788 pr_cont(".\n"); 789 else 790 pr_cont(" (none).\n"); 791 pr_alert("\tBarrier seq %lu start %lu count %d holdout CPUs ", 792 data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start), 793 atomic_read(&rtp->barrier_q_count)); 794 if (cpumask_available(cm) && !cpumask_empty(cm)) 795 pr_cont(" %*pbl.\n", cpumask_pr_args(cm)); 796 else 797 pr_cont("(none).\n"); 798 free_cpumask_var(cm); 799 } 800 801 #endif // #ifndef CONFIG_TINY_RCU 802 803 static void exit_tasks_rcu_finish_trace(struct task_struct *t); 804 805 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 806 807 //////////////////////////////////////////////////////////////////////// 808 // 809 // Shared code between task-list-scanning variants of Tasks RCU. 810 811 /* Wait for one RCU-tasks grace period. */ 812 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) 813 { 814 struct task_struct *g; 815 int fract; 816 LIST_HEAD(holdouts); 817 unsigned long j; 818 unsigned long lastinfo; 819 unsigned long lastreport; 820 bool reported = false; 821 int rtsi; 822 struct task_struct *t; 823 824 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); 825 rtp->pregp_func(&holdouts); 826 827 /* 828 * There were callbacks, so we need to wait for an RCU-tasks 829 * grace period. Start off by scanning the task list for tasks 830 * that are not already voluntarily blocked. Mark these tasks 831 * and make a list of them in holdouts. 832 */ 833 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); 834 if (rtp->pertask_func) { 835 rcu_read_lock(); 836 for_each_process_thread(g, t) 837 rtp->pertask_func(t, &holdouts); 838 rcu_read_unlock(); 839 } 840 841 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); 842 rtp->postscan_func(&holdouts); 843 844 /* 845 * Each pass through the following loop scans the list of holdout 846 * tasks, removing any that are no longer holdouts. When the list 847 * is empty, we are done. 848 */ 849 lastreport = jiffies; 850 lastinfo = lastreport; 851 rtsi = READ_ONCE(rcu_task_stall_info); 852 853 // Start off with initial wait and slowly back off to 1 HZ wait. 854 fract = rtp->init_fract; 855 856 while (!list_empty(&holdouts)) { 857 ktime_t exp; 858 bool firstreport; 859 bool needreport; 860 int rtst; 861 862 // Slowly back off waiting for holdouts 863 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); 864 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 865 schedule_timeout_idle(fract); 866 } else { 867 exp = jiffies_to_nsecs(fract); 868 __set_current_state(TASK_IDLE); 869 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD); 870 } 871 872 if (fract < HZ) 873 fract++; 874 875 rtst = READ_ONCE(rcu_task_stall_timeout); 876 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); 877 if (needreport) { 878 lastreport = jiffies; 879 reported = true; 880 } 881 firstreport = true; 882 WARN_ON(signal_pending(current)); 883 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); 884 rtp->holdouts_func(&holdouts, needreport, &firstreport); 885 886 // Print pre-stall informational messages if needed. 887 j = jiffies; 888 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { 889 lastinfo = j; 890 rtsi = rtsi * rcu_task_stall_info_mult; 891 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n", 892 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); 893 } 894 } 895 896 set_tasks_gp_state(rtp, RTGS_POST_GP); 897 rtp->postgp_func(rtp); 898 } 899 900 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ 901 902 #ifdef CONFIG_TASKS_RCU 903 904 //////////////////////////////////////////////////////////////////////// 905 // 906 // Simple variant of RCU whose quiescent states are voluntary context 907 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. 908 // As such, grace periods can take one good long time. There are no 909 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() 910 // because this implementation is intended to get the system into a safe 911 // state for some of the manipulations involved in tracing and the like. 912 // Finally, this implementation does not support high call_rcu_tasks() 913 // rates from multiple CPUs. If this is required, per-CPU callback lists 914 // will be needed. 915 // 916 // The implementation uses rcu_tasks_wait_gp(), which relies on function 917 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() 918 // function sets these function pointers up so that rcu_tasks_wait_gp() 919 // invokes these functions in this order: 920 // 921 // rcu_tasks_pregp_step(): 922 // Invokes synchronize_rcu() in order to wait for all in-flight 923 // t->on_rq and t->nvcsw transitions to complete. This works because 924 // all such transitions are carried out with interrupts disabled. 925 // rcu_tasks_pertask(), invoked on every non-idle task: 926 // For every runnable non-idle task other than the current one, use 927 // get_task_struct() to pin down that task, snapshot that task's 928 // number of voluntary context switches, and add that task to the 929 // holdout list. 930 // rcu_tasks_postscan(): 931 // Gather per-CPU lists of tasks in do_exit() to ensure that all 932 // tasks that were in the process of exiting (and which thus might 933 // not know to synchronize with this RCU Tasks grace period) have 934 // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() 935 // will take care of any tasks stuck in the non-preemptible region 936 // of do_exit() following its call to exit_tasks_rcu_finish(). 937 // check_all_holdout_tasks(), repeatedly until holdout list is empty: 938 // Scans the holdout list, attempting to identify a quiescent state 939 // for each task on the list. If there is a quiescent state, the 940 // corresponding task is removed from the holdout list. 941 // rcu_tasks_postgp(): 942 // Invokes synchronize_rcu() in order to ensure that all prior 943 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks 944 // to have happened before the end of this RCU Tasks grace period. 945 // Again, this works because all such transitions are carried out 946 // with interrupts disabled. 947 // 948 // For each exiting task, the exit_tasks_rcu_start() and 949 // exit_tasks_rcu_finish() functions add and remove, respectively, the 950 // current task to a per-CPU list of tasks that rcu_tasks_postscan() must 951 // wait on. This is necessary because rcu_tasks_postscan() must wait on 952 // tasks that have already been removed from the global list of tasks. 953 // 954 // Pre-grace-period update-side code is ordered before the grace 955 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code 956 // is ordered before the grace period via synchronize_rcu() call in 957 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt 958 // disabling. 959 960 /* Pre-grace-period preparation. */ 961 static void rcu_tasks_pregp_step(struct list_head *hop) 962 { 963 /* 964 * Wait for all pre-existing t->on_rq and t->nvcsw transitions 965 * to complete. Invoking synchronize_rcu() suffices because all 966 * these transitions occur with interrupts disabled. Without this 967 * synchronize_rcu(), a read-side critical section that started 968 * before the grace period might be incorrectly seen as having 969 * started after the grace period. 970 * 971 * This synchronize_rcu() also dispenses with the need for a 972 * memory barrier on the first store to t->rcu_tasks_holdout, 973 * as it forces the store to happen after the beginning of the 974 * grace period. 975 */ 976 synchronize_rcu(); 977 } 978 979 /* Check for quiescent states since the pregp's synchronize_rcu() */ 980 static bool rcu_tasks_is_holdout(struct task_struct *t) 981 { 982 int cpu; 983 984 /* Has the task been seen voluntarily sleeping? */ 985 if (!READ_ONCE(t->on_rq)) 986 return false; 987 988 /* 989 * Idle tasks (or idle injection) within the idle loop are RCU-tasks 990 * quiescent states. But CPU boot code performed by the idle task 991 * isn't a quiescent state. 992 */ 993 if (is_idle_task(t)) 994 return false; 995 996 cpu = task_cpu(t); 997 998 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ 999 if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) 1000 return false; 1001 1002 return true; 1003 } 1004 1005 /* Per-task initial processing. */ 1006 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) 1007 { 1008 if (t != current && rcu_tasks_is_holdout(t)) { 1009 get_task_struct(t); 1010 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 1011 WRITE_ONCE(t->rcu_tasks_holdout, true); 1012 list_add(&t->rcu_tasks_holdout_list, hop); 1013 } 1014 } 1015 1016 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); 1017 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); 1018 1019 /* Processing between scanning taskslist and draining the holdout list. */ 1020 static void rcu_tasks_postscan(struct list_head *hop) 1021 { 1022 int cpu; 1023 int rtsi = READ_ONCE(rcu_task_stall_info); 1024 1025 if (!IS_ENABLED(CONFIG_TINY_RCU)) { 1026 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; 1027 add_timer(&tasks_rcu_exit_srcu_stall_timer); 1028 } 1029 1030 /* 1031 * Exiting tasks may escape the tasklist scan. Those are vulnerable 1032 * until their final schedule() with TASK_DEAD state. To cope with 1033 * this, divide the fragile exit path part in two intersecting 1034 * read side critical sections: 1035 * 1036 * 1) A task_struct list addition before calling exit_notify(), 1037 * which may remove the task from the tasklist, with the 1038 * removal after the final preempt_disable() call in do_exit(). 1039 * 1040 * 2) An _RCU_ read side starting with the final preempt_disable() 1041 * call in do_exit() and ending with the final call to schedule() 1042 * with TASK_DEAD state. 1043 * 1044 * This handles the part 1). And postgp will handle part 2) with a 1045 * call to synchronize_rcu(). 1046 */ 1047 1048 for_each_possible_cpu(cpu) { 1049 unsigned long j = jiffies + 1; 1050 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); 1051 struct task_struct *t; 1052 struct task_struct *t1; 1053 struct list_head tmp; 1054 1055 raw_spin_lock_irq_rcu_node(rtpcp); 1056 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { 1057 if (list_empty(&t->rcu_tasks_holdout_list)) 1058 rcu_tasks_pertask(t, hop); 1059 1060 // RT kernels need frequent pauses, otherwise 1061 // pause at least once per pair of jiffies. 1062 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) 1063 continue; 1064 1065 // Keep our place in the list while pausing. 1066 // Nothing else traverses this list, so adding a 1067 // bare list_head is OK. 1068 list_add(&tmp, &t->rcu_tasks_exit_list); 1069 raw_spin_unlock_irq_rcu_node(rtpcp); 1070 cond_resched(); // For CONFIG_PREEMPT=n kernels 1071 raw_spin_lock_irq_rcu_node(rtpcp); 1072 t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); 1073 list_del(&tmp); 1074 j = jiffies + 1; 1075 } 1076 raw_spin_unlock_irq_rcu_node(rtpcp); 1077 } 1078 1079 if (!IS_ENABLED(CONFIG_TINY_RCU)) 1080 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); 1081 } 1082 1083 /* See if tasks are still holding out, complain if so. */ 1084 static void check_holdout_task(struct task_struct *t, 1085 bool needreport, bool *firstreport) 1086 { 1087 int cpu; 1088 1089 if (!READ_ONCE(t->rcu_tasks_holdout) || 1090 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 1091 !rcu_tasks_is_holdout(t) || 1092 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 1093 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { 1094 WRITE_ONCE(t->rcu_tasks_holdout, false); 1095 list_del_init(&t->rcu_tasks_holdout_list); 1096 put_task_struct(t); 1097 return; 1098 } 1099 rcu_request_urgent_qs_task(t); 1100 if (!needreport) 1101 return; 1102 if (*firstreport) { 1103 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 1104 *firstreport = false; 1105 } 1106 cpu = task_cpu(t); 1107 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 1108 t, ".I"[is_idle_task(t)], 1109 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 1110 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 1111 data_race(t->rcu_tasks_idle_cpu), cpu); 1112 sched_show_task(t); 1113 } 1114 1115 /* Scan the holdout lists for tasks no longer holding out. */ 1116 static void check_all_holdout_tasks(struct list_head *hop, 1117 bool needreport, bool *firstreport) 1118 { 1119 struct task_struct *t, *t1; 1120 1121 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { 1122 check_holdout_task(t, needreport, firstreport); 1123 cond_resched(); 1124 } 1125 } 1126 1127 /* Finish off the Tasks-RCU grace period. */ 1128 static void rcu_tasks_postgp(struct rcu_tasks *rtp) 1129 { 1130 /* 1131 * Because ->on_rq and ->nvcsw are not guaranteed to have a full 1132 * memory barriers prior to them in the schedule() path, memory 1133 * reordering on other CPUs could cause their RCU-tasks read-side 1134 * critical sections to extend past the end of the grace period. 1135 * However, because these ->nvcsw updates are carried out with 1136 * interrupts disabled, we can use synchronize_rcu() to force the 1137 * needed ordering on all such CPUs. 1138 * 1139 * This synchronize_rcu() also confines all ->rcu_tasks_holdout 1140 * accesses to be within the grace period, avoiding the need for 1141 * memory barriers for ->rcu_tasks_holdout accesses. 1142 * 1143 * In addition, this synchronize_rcu() waits for exiting tasks 1144 * to complete their final preempt_disable() region of execution, 1145 * enforcing the whole region before tasklist removal until 1146 * the final schedule() with TASK_DEAD state to be an RCU TASKS 1147 * read side critical section. 1148 */ 1149 synchronize_rcu(); 1150 } 1151 1152 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) 1153 { 1154 #ifndef CONFIG_TINY_RCU 1155 int rtsi; 1156 1157 rtsi = READ_ONCE(rcu_task_stall_info); 1158 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n", 1159 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq, 1160 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); 1161 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n"); 1162 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; 1163 add_timer(&tasks_rcu_exit_srcu_stall_timer); 1164 #endif // #ifndef CONFIG_TINY_RCU 1165 } 1166 1167 /** 1168 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 1169 * @rhp: structure to be used for queueing the RCU updates. 1170 * @func: actual callback function to be invoked after the grace period 1171 * 1172 * The callback function will be invoked some time after a full grace 1173 * period elapses, in other words after all currently executing RCU 1174 * read-side critical sections have completed. call_rcu_tasks() assumes 1175 * that the read-side critical sections end at a voluntary context 1176 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, 1177 * or transition to usermode execution. As such, there are no read-side 1178 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 1179 * this primitive is intended to determine that all tasks have passed 1180 * through a safe state, not so much for data-structure synchronization. 1181 * 1182 * See the description of call_rcu() for more detailed information on 1183 * memory ordering guarantees. 1184 */ 1185 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 1186 { 1187 call_rcu_tasks_generic(rhp, func, &rcu_tasks); 1188 } 1189 EXPORT_SYMBOL_GPL(call_rcu_tasks); 1190 1191 /** 1192 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 1193 * 1194 * Control will return to the caller some time after a full rcu-tasks 1195 * grace period has elapsed, in other words after all currently 1196 * executing rcu-tasks read-side critical sections have elapsed. These 1197 * read-side critical sections are delimited by calls to schedule(), 1198 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls 1199 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 1200 * 1201 * This is a very specialized primitive, intended only for a few uses in 1202 * tracing and other situations requiring manipulation of function 1203 * preambles and profiling hooks. The synchronize_rcu_tasks() function 1204 * is not (yet) intended for heavy use from multiple CPUs. 1205 * 1206 * See the description of synchronize_rcu() for more detailed information 1207 * on memory ordering guarantees. 1208 */ 1209 void synchronize_rcu_tasks(void) 1210 { 1211 synchronize_rcu_tasks_generic(&rcu_tasks); 1212 } 1213 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 1214 1215 /** 1216 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 1217 * 1218 * Although the current implementation is guaranteed to wait, it is not 1219 * obligated to, for example, if there are no pending callbacks. 1220 */ 1221 void rcu_barrier_tasks(void) 1222 { 1223 rcu_barrier_tasks_generic(&rcu_tasks); 1224 } 1225 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 1226 1227 static int rcu_tasks_lazy_ms = -1; 1228 module_param(rcu_tasks_lazy_ms, int, 0444); 1229 1230 static int __init rcu_spawn_tasks_kthread(void) 1231 { 1232 rcu_tasks.gp_sleep = HZ / 10; 1233 rcu_tasks.init_fract = HZ / 10; 1234 if (rcu_tasks_lazy_ms >= 0) 1235 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms); 1236 rcu_tasks.pregp_func = rcu_tasks_pregp_step; 1237 rcu_tasks.pertask_func = rcu_tasks_pertask; 1238 rcu_tasks.postscan_func = rcu_tasks_postscan; 1239 rcu_tasks.holdouts_func = check_all_holdout_tasks; 1240 rcu_tasks.postgp_func = rcu_tasks_postgp; 1241 rcu_tasks.wait_state = TASK_IDLE; 1242 rcu_spawn_tasks_kthread_generic(&rcu_tasks); 1243 return 0; 1244 } 1245 1246 #if !defined(CONFIG_TINY_RCU) 1247 void show_rcu_tasks_classic_gp_kthread(void) 1248 { 1249 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); 1250 } 1251 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); 1252 1253 void rcu_tasks_torture_stats_print(char *tt, char *tf) 1254 { 1255 rcu_tasks_torture_stats_print_generic(&rcu_tasks, tt, tf, ""); 1256 } 1257 EXPORT_SYMBOL_GPL(rcu_tasks_torture_stats_print); 1258 #endif // !defined(CONFIG_TINY_RCU) 1259 1260 struct task_struct *get_rcu_tasks_gp_kthread(void) 1261 { 1262 return rcu_tasks.kthread_ptr; 1263 } 1264 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); 1265 1266 void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq) 1267 { 1268 *flags = 0; 1269 *gp_seq = rcu_seq_current(&rcu_tasks.tasks_gp_seq); 1270 } 1271 EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data); 1272 1273 /* 1274 * Protect against tasklist scan blind spot while the task is exiting and 1275 * may be removed from the tasklist. Do this by adding the task to yet 1276 * another list. 1277 * 1278 * Note that the task will remove itself from this list, so there is no 1279 * need for get_task_struct(), except in the case where rcu_tasks_pertask() 1280 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies 1281 * the needed get_task_struct(). 1282 */ 1283 void exit_tasks_rcu_start(void) 1284 { 1285 unsigned long flags; 1286 struct rcu_tasks_percpu *rtpcp; 1287 struct task_struct *t = current; 1288 1289 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); 1290 preempt_disable(); 1291 rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); 1292 t->rcu_tasks_exit_cpu = smp_processor_id(); 1293 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1294 WARN_ON_ONCE(!rtpcp->rtp_exit_list.next); 1295 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list); 1296 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1297 preempt_enable(); 1298 } 1299 1300 /* 1301 * Remove the task from the "yet another list" because do_exit() is now 1302 * non-preemptible, allowing synchronize_rcu() to wait beyond this point. 1303 */ 1304 void exit_tasks_rcu_finish(void) 1305 { 1306 unsigned long flags; 1307 struct rcu_tasks_percpu *rtpcp; 1308 struct task_struct *t = current; 1309 1310 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); 1311 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); 1312 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1313 list_del_init(&t->rcu_tasks_exit_list); 1314 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1315 1316 exit_tasks_rcu_finish_trace(t); 1317 } 1318 1319 #else /* #ifdef CONFIG_TASKS_RCU */ 1320 void exit_tasks_rcu_start(void) { } 1321 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } 1322 #endif /* #else #ifdef CONFIG_TASKS_RCU */ 1323 1324 #ifdef CONFIG_TASKS_RUDE_RCU 1325 1326 //////////////////////////////////////////////////////////////////////// 1327 // 1328 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's 1329 // trick of passing an empty function to schedule_on_each_cpu(). 1330 // This approach provides batching of concurrent calls to the synchronous 1331 // synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu() 1332 // in order to send IPIs far and wide and induces otherwise unnecessary 1333 // context switches on all online CPUs, whether idle or not. 1334 // 1335 // Callback handling is provided by the rcu_tasks_kthread() function. 1336 // 1337 // Ordering is provided by the scheduler's context-switch code. 1338 1339 // Empty function to allow workqueues to force a context switch. 1340 static void rcu_tasks_be_rude(struct work_struct *work) 1341 { 1342 } 1343 1344 // Wait for one rude RCU-tasks grace period. 1345 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) 1346 { 1347 rtp->n_ipis += cpumask_weight(cpu_online_mask); 1348 schedule_on_each_cpu(rcu_tasks_be_rude); 1349 } 1350 1351 static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); 1352 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, 1353 "RCU Tasks Rude"); 1354 1355 /* 1356 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period 1357 * @rhp: structure to be used for queueing the RCU updates. 1358 * @func: actual callback function to be invoked after the grace period 1359 * 1360 * The callback function will be invoked some time after a full grace 1361 * period elapses, in other words after all currently executing RCU 1362 * read-side critical sections have completed. call_rcu_tasks_rude() 1363 * assumes that the read-side critical sections end at context switch, 1364 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as 1365 * usermode execution is schedulable). As such, there are no read-side 1366 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 1367 * this primitive is intended to determine that all tasks have passed 1368 * through a safe state, not so much for data-structure synchronization. 1369 * 1370 * See the description of call_rcu() for more detailed information on 1371 * memory ordering guarantees. 1372 * 1373 * This is no longer exported, and is instead reserved for use by 1374 * synchronize_rcu_tasks_rude(). 1375 */ 1376 static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) 1377 { 1378 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); 1379 } 1380 1381 /** 1382 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period 1383 * 1384 * Control will return to the caller some time after a rude rcu-tasks 1385 * grace period has elapsed, in other words after all currently 1386 * executing rcu-tasks read-side critical sections have elapsed. These 1387 * read-side critical sections are delimited by calls to schedule(), 1388 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable 1389 * context), and (in theory, anyway) cond_resched(). 1390 * 1391 * This is a very specialized primitive, intended only for a few uses in 1392 * tracing and other situations requiring manipulation of function preambles 1393 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not 1394 * (yet) intended for heavy use from multiple CPUs. 1395 * 1396 * See the description of synchronize_rcu() for more detailed information 1397 * on memory ordering guarantees. 1398 */ 1399 void synchronize_rcu_tasks_rude(void) 1400 { 1401 synchronize_rcu_tasks_generic(&rcu_tasks_rude); 1402 } 1403 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); 1404 1405 static int __init rcu_spawn_tasks_rude_kthread(void) 1406 { 1407 rcu_tasks_rude.gp_sleep = HZ / 10; 1408 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); 1409 return 0; 1410 } 1411 1412 #if !defined(CONFIG_TINY_RCU) 1413 void show_rcu_tasks_rude_gp_kthread(void) 1414 { 1415 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); 1416 } 1417 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); 1418 1419 void rcu_tasks_rude_torture_stats_print(char *tt, char *tf) 1420 { 1421 rcu_tasks_torture_stats_print_generic(&rcu_tasks_rude, tt, tf, ""); 1422 } 1423 EXPORT_SYMBOL_GPL(rcu_tasks_rude_torture_stats_print); 1424 #endif // !defined(CONFIG_TINY_RCU) 1425 1426 struct task_struct *get_rcu_tasks_rude_gp_kthread(void) 1427 { 1428 return rcu_tasks_rude.kthread_ptr; 1429 } 1430 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); 1431 1432 void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq) 1433 { 1434 *flags = 0; 1435 *gp_seq = rcu_seq_current(&rcu_tasks_rude.tasks_gp_seq); 1436 } 1437 EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data); 1438 1439 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 1440 1441 //////////////////////////////////////////////////////////////////////// 1442 // 1443 // Tracing variant of Tasks RCU. This variant is designed to be used 1444 // to protect tracing hooks, including those of BPF. This variant 1445 // therefore: 1446 // 1447 // 1. Has explicit read-side markers to allow finite grace periods 1448 // in the face of in-kernel loops for PREEMPT=n builds. 1449 // 1450 // 2. Protects code in the idle loop, exception entry/exit, and 1451 // CPU-hotplug code paths, similar to the capabilities of SRCU. 1452 // 1453 // 3. Avoids expensive read-side instructions, having overhead similar 1454 // to that of Preemptible RCU. 1455 // 1456 // There are of course downsides. For example, the grace-period code 1457 // can send IPIs to CPUs, even when those CPUs are in the idle loop or 1458 // in nohz_full userspace. If needed, these downsides can be at least 1459 // partially remedied. 1460 // 1461 // Perhaps most important, this variant of RCU does not affect the vanilla 1462 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace 1463 // readers can operate from idle, offline, and exception entry/exit in no 1464 // way allows rcu_preempt and rcu_sched readers to also do so. 1465 // 1466 // The implementation uses rcu_tasks_wait_gp(), which relies on function 1467 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() 1468 // function sets these function pointers up so that rcu_tasks_wait_gp() 1469 // invokes these functions in this order: 1470 // 1471 // rcu_tasks_trace_pregp_step(): 1472 // Disables CPU hotplug, adds all currently executing tasks to the 1473 // holdout list, then checks the state of all tasks that blocked 1474 // or were preempted within their current RCU Tasks Trace read-side 1475 // critical section, adding them to the holdout list if appropriate. 1476 // Finally, this function re-enables CPU hotplug. 1477 // The ->pertask_func() pointer is NULL, so there is no per-task processing. 1478 // rcu_tasks_trace_postscan(): 1479 // Invokes synchronize_rcu() to wait for late-stage exiting tasks 1480 // to finish exiting. 1481 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: 1482 // Scans the holdout list, attempting to identify a quiescent state 1483 // for each task on the list. If there is a quiescent state, the 1484 // corresponding task is removed from the holdout list. Once this 1485 // list is empty, the grace period has completed. 1486 // rcu_tasks_trace_postgp(): 1487 // Provides the needed full memory barrier and does debug checks. 1488 // 1489 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. 1490 // 1491 // Pre-grace-period update-side code is ordered before the grace period 1492 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period 1493 // read-side code is ordered before the grace period by atomic operations 1494 // on .b.need_qs flag of each task involved in this process, or by scheduler 1495 // context-switch ordering (for locked-down non-running readers). 1496 1497 // The lockdep state must be outside of #ifdef to be useful. 1498 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1499 static struct lock_class_key rcu_lock_trace_key; 1500 struct lockdep_map rcu_trace_lock_map = 1501 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); 1502 EXPORT_SYMBOL_GPL(rcu_trace_lock_map); 1503 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 1504 1505 #ifdef CONFIG_TASKS_TRACE_RCU 1506 1507 // Record outstanding IPIs to each CPU. No point in sending two... 1508 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); 1509 1510 // The number of detections of task quiescent state relying on 1511 // heavyweight readers executing explicit memory barriers. 1512 static unsigned long n_heavy_reader_attempts; 1513 static unsigned long n_heavy_reader_updates; 1514 static unsigned long n_heavy_reader_ofl_updates; 1515 static unsigned long n_trc_holdouts; 1516 1517 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); 1518 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, 1519 "RCU Tasks Trace"); 1520 1521 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ 1522 static u8 rcu_ld_need_qs(struct task_struct *t) 1523 { 1524 smp_mb(); // Enforce full grace-period ordering. 1525 return smp_load_acquire(&t->trc_reader_special.b.need_qs); 1526 } 1527 1528 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ 1529 static void rcu_st_need_qs(struct task_struct *t, u8 v) 1530 { 1531 smp_store_release(&t->trc_reader_special.b.need_qs, v); 1532 smp_mb(); // Enforce full grace-period ordering. 1533 } 1534 1535 /* 1536 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for 1537 * the four-byte operand-size restriction of some platforms. 1538 * 1539 * Returns the old value, which is often ignored. 1540 */ 1541 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) 1542 { 1543 union rcu_special ret; 1544 union rcu_special trs_old = READ_ONCE(t->trc_reader_special); 1545 union rcu_special trs_new = trs_old; 1546 1547 if (trs_old.b.need_qs != old) 1548 return trs_old.b.need_qs; 1549 trs_new.b.need_qs = new; 1550 1551 // Although cmpxchg() appears to KCSAN to update all four bytes, 1552 // only the .b.need_qs byte actually changes. 1553 instrument_atomic_read_write(&t->trc_reader_special.b.need_qs, 1554 sizeof(t->trc_reader_special.b.need_qs)); 1555 // Avoid false-positive KCSAN failures. 1556 ret.s = data_race(cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s)); 1557 1558 return ret.b.need_qs; 1559 } 1560 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); 1561 1562 /* 1563 * If we are the last reader, signal the grace-period kthread. 1564 * Also remove from the per-CPU list of blocked tasks. 1565 */ 1566 void rcu_read_unlock_trace_special(struct task_struct *t) 1567 { 1568 unsigned long flags; 1569 struct rcu_tasks_percpu *rtpcp; 1570 union rcu_special trs; 1571 1572 // Open-coded full-word version of rcu_ld_need_qs(). 1573 smp_mb(); // Enforce full grace-period ordering. 1574 trs = smp_load_acquire(&t->trc_reader_special); 1575 1576 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) 1577 smp_mb(); // Pairs with update-side barriers. 1578 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. 1579 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { 1580 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, 1581 TRC_NEED_QS_CHECKED); 1582 1583 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); 1584 } 1585 if (trs.b.blocked) { 1586 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); 1587 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1588 list_del_init(&t->trc_blkd_node); 1589 WRITE_ONCE(t->trc_reader_special.b.blocked, false); 1590 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1591 } 1592 WRITE_ONCE(t->trc_reader_nesting, 0); 1593 } 1594 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); 1595 1596 /* Add a newly blocked reader task to its CPU's list. */ 1597 void rcu_tasks_trace_qs_blkd(struct task_struct *t) 1598 { 1599 unsigned long flags; 1600 struct rcu_tasks_percpu *rtpcp; 1601 1602 local_irq_save(flags); 1603 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); 1604 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled 1605 t->trc_blkd_cpu = smp_processor_id(); 1606 if (!rtpcp->rtp_blkd_tasks.next) 1607 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); 1608 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); 1609 WRITE_ONCE(t->trc_reader_special.b.blocked, true); 1610 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1611 } 1612 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); 1613 1614 /* Add a task to the holdout list, if it is not already on the list. */ 1615 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) 1616 { 1617 if (list_empty(&t->trc_holdout_list)) { 1618 get_task_struct(t); 1619 list_add(&t->trc_holdout_list, bhp); 1620 n_trc_holdouts++; 1621 } 1622 } 1623 1624 /* Remove a task from the holdout list, if it is in fact present. */ 1625 static void trc_del_holdout(struct task_struct *t) 1626 { 1627 if (!list_empty(&t->trc_holdout_list)) { 1628 list_del_init(&t->trc_holdout_list); 1629 put_task_struct(t); 1630 n_trc_holdouts--; 1631 } 1632 } 1633 1634 /* IPI handler to check task state. */ 1635 static void trc_read_check_handler(void *t_in) 1636 { 1637 int nesting; 1638 struct task_struct *t = current; 1639 struct task_struct *texp = t_in; 1640 1641 // If the task is no longer running on this CPU, leave. 1642 if (unlikely(texp != t)) 1643 goto reset_ipi; // Already on holdout list, so will check later. 1644 1645 // If the task is not in a read-side critical section, and 1646 // if this is the last reader, awaken the grace-period kthread. 1647 nesting = READ_ONCE(t->trc_reader_nesting); 1648 if (likely(!nesting)) { 1649 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1650 goto reset_ipi; 1651 } 1652 // If we are racing with an rcu_read_unlock_trace(), try again later. 1653 if (unlikely(nesting < 0)) 1654 goto reset_ipi; 1655 1656 // Get here if the task is in a read-side critical section. 1657 // Set its state so that it will update state for the grace-period 1658 // kthread upon exit from that critical section. 1659 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); 1660 1661 reset_ipi: 1662 // Allow future IPIs to be sent on CPU and for task. 1663 // Also order this IPI handler against any later manipulations of 1664 // the intended task. 1665 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ 1666 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ 1667 } 1668 1669 /* Callback function for scheduler to check locked-down task. */ 1670 static int trc_inspect_reader(struct task_struct *t, void *bhp_in) 1671 { 1672 struct list_head *bhp = bhp_in; 1673 int cpu = task_cpu(t); 1674 int nesting; 1675 bool ofl = cpu_is_offline(cpu); 1676 1677 if (task_curr(t) && !ofl) { 1678 // If no chance of heavyweight readers, do it the hard way. 1679 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) 1680 return -EINVAL; 1681 1682 // If heavyweight readers are enabled on the remote task, 1683 // we can inspect its state despite its currently running. 1684 // However, we cannot safely change its state. 1685 n_heavy_reader_attempts++; 1686 // Check for "running" idle tasks on offline CPUs. 1687 if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting)) 1688 return -EINVAL; // No quiescent state, do it the hard way. 1689 n_heavy_reader_updates++; 1690 nesting = 0; 1691 } else { 1692 // The task is not running, so C-language access is safe. 1693 nesting = t->trc_reader_nesting; 1694 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); 1695 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) 1696 n_heavy_reader_ofl_updates++; 1697 } 1698 1699 // If not exiting a read-side critical section, mark as checked 1700 // so that the grace-period kthread will remove it from the 1701 // holdout list. 1702 if (!nesting) { 1703 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1704 return 0; // In QS, so done. 1705 } 1706 if (nesting < 0) 1707 return -EINVAL; // Reader transitioning, try again later. 1708 1709 // The task is in a read-side critical section, so set up its 1710 // state so that it will update state upon exit from that critical 1711 // section. 1712 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) 1713 trc_add_holdout(t, bhp); 1714 return 0; 1715 } 1716 1717 /* Attempt to extract the state for the specified task. */ 1718 static void trc_wait_for_one_reader(struct task_struct *t, 1719 struct list_head *bhp) 1720 { 1721 int cpu; 1722 1723 // If a previous IPI is still in flight, let it complete. 1724 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI 1725 return; 1726 1727 // The current task had better be in a quiescent state. 1728 if (t == current) { 1729 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1730 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1731 return; 1732 } 1733 1734 // Attempt to nail down the task for inspection. 1735 get_task_struct(t); 1736 if (!task_call_func(t, trc_inspect_reader, bhp)) { 1737 put_task_struct(t); 1738 return; 1739 } 1740 put_task_struct(t); 1741 1742 // If this task is not yet on the holdout list, then we are in 1743 // an RCU read-side critical section. Otherwise, the invocation of 1744 // trc_add_holdout() that added it to the list did the necessary 1745 // get_task_struct(). Either way, the task cannot be freed out 1746 // from under this code. 1747 1748 // If currently running, send an IPI, either way, add to list. 1749 trc_add_holdout(t, bhp); 1750 if (task_curr(t) && 1751 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { 1752 // The task is currently running, so try IPIing it. 1753 cpu = task_cpu(t); 1754 1755 // If there is already an IPI outstanding, let it happen. 1756 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) 1757 return; 1758 1759 per_cpu(trc_ipi_to_cpu, cpu) = true; 1760 t->trc_ipi_to_cpu = cpu; 1761 rcu_tasks_trace.n_ipis++; 1762 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { 1763 // Just in case there is some other reason for 1764 // failure than the target CPU being offline. 1765 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", 1766 __func__, cpu); 1767 rcu_tasks_trace.n_ipis_fails++; 1768 per_cpu(trc_ipi_to_cpu, cpu) = false; 1769 t->trc_ipi_to_cpu = -1; 1770 } 1771 } 1772 } 1773 1774 /* 1775 * Initialize for first-round processing for the specified task. 1776 * Return false if task is NULL or already taken care of, true otherwise. 1777 */ 1778 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) 1779 { 1780 // During early boot when there is only the one boot CPU, there 1781 // is no idle task for the other CPUs. Also, the grace-period 1782 // kthread is always in a quiescent state. In addition, just return 1783 // if this task is already on the list. 1784 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) 1785 return false; 1786 1787 rcu_st_need_qs(t, 0); 1788 t->trc_ipi_to_cpu = -1; 1789 return true; 1790 } 1791 1792 /* Do first-round processing for the specified task. */ 1793 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) 1794 { 1795 if (rcu_tasks_trace_pertask_prep(t, true)) 1796 trc_wait_for_one_reader(t, hop); 1797 } 1798 1799 /* Initialize for a new RCU-tasks-trace grace period. */ 1800 static void rcu_tasks_trace_pregp_step(struct list_head *hop) 1801 { 1802 LIST_HEAD(blkd_tasks); 1803 int cpu; 1804 unsigned long flags; 1805 struct rcu_tasks_percpu *rtpcp; 1806 struct task_struct *t; 1807 1808 // There shouldn't be any old IPIs, but... 1809 for_each_possible_cpu(cpu) 1810 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); 1811 1812 // Disable CPU hotplug across the CPU scan for the benefit of 1813 // any IPIs that might be needed. This also waits for all readers 1814 // in CPU-hotplug code paths. 1815 cpus_read_lock(); 1816 1817 // These rcu_tasks_trace_pertask_prep() calls are serialized to 1818 // allow safe access to the hop list. 1819 for_each_online_cpu(cpu) { 1820 rcu_read_lock(); 1821 // Note that cpu_curr_snapshot() picks up the target 1822 // CPU's current task while its runqueue is locked with 1823 // an smp_mb__after_spinlock(). This ensures that either 1824 // the grace-period kthread will see that task's read-side 1825 // critical section or the task will see the updater's pre-GP 1826 // accesses. The trailing smp_mb() in cpu_curr_snapshot() 1827 // does not currently play a role other than simplify 1828 // that function's ordering semantics. If these simplified 1829 // ordering semantics continue to be redundant, that smp_mb() 1830 // might be removed. 1831 t = cpu_curr_snapshot(cpu); 1832 if (rcu_tasks_trace_pertask_prep(t, true)) 1833 trc_add_holdout(t, hop); 1834 rcu_read_unlock(); 1835 cond_resched_tasks_rcu_qs(); 1836 } 1837 1838 // Only after all running tasks have been accounted for is it 1839 // safe to take care of the tasks that have blocked within their 1840 // current RCU tasks trace read-side critical section. 1841 for_each_possible_cpu(cpu) { 1842 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); 1843 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1844 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); 1845 while (!list_empty(&blkd_tasks)) { 1846 rcu_read_lock(); 1847 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); 1848 list_del_init(&t->trc_blkd_node); 1849 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); 1850 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1851 rcu_tasks_trace_pertask(t, hop); 1852 rcu_read_unlock(); 1853 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 1854 } 1855 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 1856 cond_resched_tasks_rcu_qs(); 1857 } 1858 1859 // Re-enable CPU hotplug now that the holdout list is populated. 1860 cpus_read_unlock(); 1861 } 1862 1863 /* 1864 * Do intermediate processing between task and holdout scans. 1865 */ 1866 static void rcu_tasks_trace_postscan(struct list_head *hop) 1867 { 1868 // Wait for late-stage exiting tasks to finish exiting. 1869 // These might have passed the call to exit_tasks_rcu_finish(). 1870 1871 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! 1872 synchronize_rcu(); 1873 // Any tasks that exit after this point will set 1874 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. 1875 } 1876 1877 /* Communicate task state back to the RCU tasks trace stall warning request. */ 1878 struct trc_stall_chk_rdr { 1879 int nesting; 1880 int ipi_to_cpu; 1881 u8 needqs; 1882 }; 1883 1884 static int trc_check_slow_task(struct task_struct *t, void *arg) 1885 { 1886 struct trc_stall_chk_rdr *trc_rdrp = arg; 1887 1888 if (task_curr(t) && cpu_online(task_cpu(t))) 1889 return false; // It is running, so decline to inspect it. 1890 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); 1891 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); 1892 trc_rdrp->needqs = rcu_ld_need_qs(t); 1893 return true; 1894 } 1895 1896 /* Show the state of a task stalling the current RCU tasks trace GP. */ 1897 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) 1898 { 1899 int cpu; 1900 struct trc_stall_chk_rdr trc_rdr; 1901 bool is_idle_tsk = is_idle_task(t); 1902 1903 if (*firstreport) { 1904 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); 1905 *firstreport = false; 1906 } 1907 cpu = task_cpu(t); 1908 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) 1909 pr_alert("P%d: %c%c\n", 1910 t->pid, 1911 ".I"[t->trc_ipi_to_cpu >= 0], 1912 ".i"[is_idle_tsk]); 1913 else 1914 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", 1915 t->pid, 1916 ".I"[trc_rdr.ipi_to_cpu >= 0], 1917 ".i"[is_idle_tsk], 1918 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], 1919 ".B"[!!data_race(t->trc_reader_special.b.blocked)], 1920 trc_rdr.nesting, 1921 " !CN"[trc_rdr.needqs & 0x3], 1922 " ?"[trc_rdr.needqs > 0x3], 1923 cpu, cpu_online(cpu) ? "" : "(offline)"); 1924 sched_show_task(t); 1925 } 1926 1927 /* List stalled IPIs for RCU tasks trace. */ 1928 static void show_stalled_ipi_trace(void) 1929 { 1930 int cpu; 1931 1932 for_each_possible_cpu(cpu) 1933 if (per_cpu(trc_ipi_to_cpu, cpu)) 1934 pr_alert("\tIPI outstanding to CPU %d\n", cpu); 1935 } 1936 1937 /* Do one scan of the holdout list. */ 1938 static void check_all_holdout_tasks_trace(struct list_head *hop, 1939 bool needreport, bool *firstreport) 1940 { 1941 struct task_struct *g, *t; 1942 1943 // Disable CPU hotplug across the holdout list scan for IPIs. 1944 cpus_read_lock(); 1945 1946 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { 1947 // If safe and needed, try to check the current task. 1948 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && 1949 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) 1950 trc_wait_for_one_reader(t, hop); 1951 1952 // If check succeeded, remove this task from the list. 1953 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && 1954 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) 1955 trc_del_holdout(t); 1956 else if (needreport) 1957 show_stalled_task_trace(t, firstreport); 1958 cond_resched_tasks_rcu_qs(); 1959 } 1960 1961 // Re-enable CPU hotplug now that the holdout list scan has completed. 1962 cpus_read_unlock(); 1963 1964 if (needreport) { 1965 if (*firstreport) 1966 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); 1967 show_stalled_ipi_trace(); 1968 } 1969 } 1970 1971 static void rcu_tasks_trace_empty_fn(void *unused) 1972 { 1973 } 1974 1975 /* Wait for grace period to complete and provide ordering. */ 1976 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) 1977 { 1978 int cpu; 1979 1980 // Wait for any lingering IPI handlers to complete. Note that 1981 // if a CPU has gone offline or transitioned to userspace in the 1982 // meantime, all IPI handlers should have been drained beforehand. 1983 // Yes, this assumes that CPUs process IPIs in order. If that ever 1984 // changes, there will need to be a recheck and/or timed wait. 1985 for_each_online_cpu(cpu) 1986 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) 1987 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); 1988 1989 smp_mb(); // Caller's code must be ordered after wakeup. 1990 // Pairs with pretty much every ordering primitive. 1991 } 1992 1993 /* Report any needed quiescent state for this exiting task. */ 1994 static void exit_tasks_rcu_finish_trace(struct task_struct *t) 1995 { 1996 union rcu_special trs = READ_ONCE(t->trc_reader_special); 1997 1998 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); 1999 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 2000 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) 2001 rcu_read_unlock_trace_special(t); 2002 else 2003 WRITE_ONCE(t->trc_reader_nesting, 0); 2004 } 2005 2006 /** 2007 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period 2008 * @rhp: structure to be used for queueing the RCU updates. 2009 * @func: actual callback function to be invoked after the grace period 2010 * 2011 * The callback function will be invoked some time after a trace rcu-tasks 2012 * grace period elapses, in other words after all currently executing 2013 * trace rcu-tasks read-side critical sections have completed. These 2014 * read-side critical sections are delimited by calls to rcu_read_lock_trace() 2015 * and rcu_read_unlock_trace(). 2016 * 2017 * See the description of call_rcu() for more detailed information on 2018 * memory ordering guarantees. 2019 */ 2020 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) 2021 { 2022 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); 2023 } 2024 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); 2025 2026 /** 2027 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period 2028 * 2029 * Control will return to the caller some time after a trace rcu-tasks 2030 * grace period has elapsed, in other words after all currently executing 2031 * trace rcu-tasks read-side critical sections have elapsed. These read-side 2032 * critical sections are delimited by calls to rcu_read_lock_trace() 2033 * and rcu_read_unlock_trace(). 2034 * 2035 * This is a very specialized primitive, intended only for a few uses in 2036 * tracing and other situations requiring manipulation of function preambles 2037 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not 2038 * (yet) intended for heavy use from multiple CPUs. 2039 * 2040 * See the description of synchronize_rcu() for more detailed information 2041 * on memory ordering guarantees. 2042 */ 2043 void synchronize_rcu_tasks_trace(void) 2044 { 2045 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); 2046 synchronize_rcu_tasks_generic(&rcu_tasks_trace); 2047 } 2048 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); 2049 2050 /** 2051 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. 2052 * 2053 * Although the current implementation is guaranteed to wait, it is not 2054 * obligated to, for example, if there are no pending callbacks. 2055 */ 2056 void rcu_barrier_tasks_trace(void) 2057 { 2058 rcu_barrier_tasks_generic(&rcu_tasks_trace); 2059 } 2060 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); 2061 2062 int rcu_tasks_trace_lazy_ms = -1; 2063 module_param(rcu_tasks_trace_lazy_ms, int, 0444); 2064 2065 static int __init rcu_spawn_tasks_trace_kthread(void) 2066 { 2067 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { 2068 rcu_tasks_trace.gp_sleep = HZ / 10; 2069 rcu_tasks_trace.init_fract = HZ / 10; 2070 } else { 2071 rcu_tasks_trace.gp_sleep = HZ / 200; 2072 if (rcu_tasks_trace.gp_sleep <= 0) 2073 rcu_tasks_trace.gp_sleep = 1; 2074 rcu_tasks_trace.init_fract = HZ / 200; 2075 if (rcu_tasks_trace.init_fract <= 0) 2076 rcu_tasks_trace.init_fract = 1; 2077 } 2078 if (rcu_tasks_trace_lazy_ms >= 0) 2079 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms); 2080 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; 2081 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; 2082 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; 2083 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; 2084 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); 2085 return 0; 2086 } 2087 2088 #if !defined(CONFIG_TINY_RCU) 2089 void show_rcu_tasks_trace_gp_kthread(void) 2090 { 2091 char buf[64]; 2092 2093 snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu", 2094 data_race(n_trc_holdouts), 2095 data_race(n_heavy_reader_ofl_updates), 2096 data_race(n_heavy_reader_updates), 2097 data_race(n_heavy_reader_attempts)); 2098 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); 2099 } 2100 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); 2101 2102 void rcu_tasks_trace_torture_stats_print(char *tt, char *tf) 2103 { 2104 rcu_tasks_torture_stats_print_generic(&rcu_tasks_trace, tt, tf, ""); 2105 } 2106 EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print); 2107 #endif // !defined(CONFIG_TINY_RCU) 2108 2109 struct task_struct *get_rcu_tasks_trace_gp_kthread(void) 2110 { 2111 return rcu_tasks_trace.kthread_ptr; 2112 } 2113 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); 2114 2115 void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq) 2116 { 2117 *flags = 0; 2118 *gp_seq = rcu_seq_current(&rcu_tasks_trace.tasks_gp_seq); 2119 } 2120 EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data); 2121 2122 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ 2123 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } 2124 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ 2125 2126 #ifndef CONFIG_TINY_RCU 2127 void show_rcu_tasks_gp_kthreads(void) 2128 { 2129 show_rcu_tasks_classic_gp_kthread(); 2130 show_rcu_tasks_rude_gp_kthread(); 2131 show_rcu_tasks_trace_gp_kthread(); 2132 } 2133 #endif /* #ifndef CONFIG_TINY_RCU */ 2134 2135 #ifdef CONFIG_PROVE_RCU 2136 struct rcu_tasks_test_desc { 2137 struct rcu_head rh; 2138 const char *name; 2139 bool notrun; 2140 unsigned long runstart; 2141 }; 2142 2143 static struct rcu_tasks_test_desc tests[] = { 2144 { 2145 .name = "call_rcu_tasks()", 2146 /* If not defined, the test is skipped. */ 2147 .notrun = IS_ENABLED(CONFIG_TASKS_RCU), 2148 }, 2149 { 2150 .name = "call_rcu_tasks_trace()", 2151 /* If not defined, the test is skipped. */ 2152 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) 2153 } 2154 }; 2155 2156 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 2157 static void test_rcu_tasks_callback(struct rcu_head *rhp) 2158 { 2159 struct rcu_tasks_test_desc *rttd = 2160 container_of(rhp, struct rcu_tasks_test_desc, rh); 2161 2162 pr_info("Callback from %s invoked.\n", rttd->name); 2163 2164 rttd->notrun = false; 2165 } 2166 #endif // #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 2167 2168 static void rcu_tasks_initiate_self_tests(void) 2169 { 2170 #ifdef CONFIG_TASKS_RCU 2171 pr_info("Running RCU Tasks wait API self tests\n"); 2172 tests[0].runstart = jiffies; 2173 synchronize_rcu_tasks(); 2174 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); 2175 #endif 2176 2177 #ifdef CONFIG_TASKS_RUDE_RCU 2178 pr_info("Running RCU Tasks Rude wait API self tests\n"); 2179 synchronize_rcu_tasks_rude(); 2180 #endif 2181 2182 #ifdef CONFIG_TASKS_TRACE_RCU 2183 pr_info("Running RCU Tasks Trace wait API self tests\n"); 2184 tests[1].runstart = jiffies; 2185 synchronize_rcu_tasks_trace(); 2186 call_rcu_tasks_trace(&tests[1].rh, test_rcu_tasks_callback); 2187 #endif 2188 } 2189 2190 /* 2191 * Return: 0 - test passed 2192 * 1 - test failed, but have not timed out yet 2193 * -1 - test failed and timed out 2194 */ 2195 static int rcu_tasks_verify_self_tests(void) 2196 { 2197 int ret = 0; 2198 int i; 2199 unsigned long bst = rcu_task_stall_timeout; 2200 2201 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) 2202 bst = RCU_TASK_BOOT_STALL_TIMEOUT; 2203 for (i = 0; i < ARRAY_SIZE(tests); i++) { 2204 while (tests[i].notrun) { // still hanging. 2205 if (time_after(jiffies, tests[i].runstart + bst)) { 2206 pr_err("%s has failed boot-time tests.\n", tests[i].name); 2207 ret = -1; 2208 break; 2209 } 2210 ret = 1; 2211 break; 2212 } 2213 } 2214 WARN_ON(ret < 0); 2215 2216 return ret; 2217 } 2218 2219 /* 2220 * Repeat the rcu_tasks_verify_self_tests() call once every second until the 2221 * test passes or has timed out. 2222 */ 2223 static struct delayed_work rcu_tasks_verify_work; 2224 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) 2225 { 2226 int ret = rcu_tasks_verify_self_tests(); 2227 2228 if (ret <= 0) 2229 return; 2230 2231 /* Test fails but not timed out yet, reschedule another check */ 2232 schedule_delayed_work(&rcu_tasks_verify_work, HZ); 2233 } 2234 2235 static int rcu_tasks_verify_schedule_work(void) 2236 { 2237 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); 2238 rcu_tasks_verify_work_fn(NULL); 2239 return 0; 2240 } 2241 late_initcall(rcu_tasks_verify_schedule_work); 2242 #else /* #ifdef CONFIG_PROVE_RCU */ 2243 static void rcu_tasks_initiate_self_tests(void) { } 2244 #endif /* #else #ifdef CONFIG_PROVE_RCU */ 2245 2246 void __init tasks_cblist_init_generic(void) 2247 { 2248 lockdep_assert_irqs_disabled(); 2249 WARN_ON(num_online_cpus() > 1); 2250 2251 #ifdef CONFIG_TASKS_RCU 2252 cblist_init_generic(&rcu_tasks); 2253 #endif 2254 2255 #ifdef CONFIG_TASKS_RUDE_RCU 2256 cblist_init_generic(&rcu_tasks_rude); 2257 #endif 2258 2259 #ifdef CONFIG_TASKS_TRACE_RCU 2260 cblist_init_generic(&rcu_tasks_trace); 2261 #endif 2262 } 2263 2264 void __init rcu_init_tasks_generic(void) 2265 { 2266 #ifdef CONFIG_TASKS_RCU 2267 rcu_spawn_tasks_kthread(); 2268 #endif 2269 2270 #ifdef CONFIG_TASKS_RUDE_RCU 2271 rcu_spawn_tasks_rude_kthread(); 2272 #endif 2273 2274 #ifdef CONFIG_TASKS_TRACE_RCU 2275 rcu_spawn_tasks_trace_kthread(); 2276 #endif 2277 2278 // Run the self-tests. 2279 rcu_tasks_initiate_self_tests(); 2280 } 2281 2282 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 2283 static inline void rcu_tasks_bootup_oddness(void) {} 2284 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 2285