1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_LICENSE("GPL"); 55 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 56 57 /* Bits for ->extendables field, extendables param, and related definitions. */ 58 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 59 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 60 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 61 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 62 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 63 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 64 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 65 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 66 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 67 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 68 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 69 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 70 #define RCUTORTURE_MAX_EXTEND \ 71 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 72 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 73 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 74 /* Must be power of two minus one. */ 75 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 76 77 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 78 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 79 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 80 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 81 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 82 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 83 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 84 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 85 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 86 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 87 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 88 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 89 torture_param(bool, gp_cond_exp_full, false, 90 "Use conditional/async full-stateexpedited GP wait primitives"); 91 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 92 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 93 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 94 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 95 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 96 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 97 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 98 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 99 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 100 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 101 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 102 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 103 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 104 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 105 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 106 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 107 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 108 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 109 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 110 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 111 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 112 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 113 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 114 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 115 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 116 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 117 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 118 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 119 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 120 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 121 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 122 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 123 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 124 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 125 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 126 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 127 128 static char *torture_type = "rcu"; 129 module_param(torture_type, charp, 0444); 130 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 131 132 static int nrealnocbers; 133 static int nrealreaders; 134 static struct task_struct *writer_task; 135 static struct task_struct **fakewriter_tasks; 136 static struct task_struct **reader_tasks; 137 static struct task_struct **nocb_tasks; 138 static struct task_struct *stats_task; 139 static struct task_struct *fqs_task; 140 static struct task_struct *boost_tasks[NR_CPUS]; 141 static struct task_struct *stall_task; 142 static struct task_struct **fwd_prog_tasks; 143 static struct task_struct **barrier_cbs_tasks; 144 static struct task_struct *barrier_task; 145 static struct task_struct *read_exit_task; 146 147 #define RCU_TORTURE_PIPE_LEN 10 148 149 // Mailbox-like structure to check RCU global memory ordering. 150 struct rcu_torture_reader_check { 151 unsigned long rtc_myloops; 152 int rtc_chkrdr; 153 unsigned long rtc_chkloops; 154 int rtc_ready; 155 struct rcu_torture_reader_check *rtc_assigner; 156 } ____cacheline_internodealigned_in_smp; 157 158 // Update-side data structure used to check RCU readers. 159 struct rcu_torture { 160 struct rcu_head rtort_rcu; 161 int rtort_pipe_count; 162 struct list_head rtort_free; 163 int rtort_mbtest; 164 struct rcu_torture_reader_check *rtort_chkp; 165 }; 166 167 static LIST_HEAD(rcu_torture_freelist); 168 static struct rcu_torture __rcu *rcu_torture_current; 169 static unsigned long rcu_torture_current_version; 170 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 171 static DEFINE_SPINLOCK(rcu_torture_lock); 172 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 174 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 175 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 176 static atomic_t n_rcu_torture_alloc; 177 static atomic_t n_rcu_torture_alloc_fail; 178 static atomic_t n_rcu_torture_free; 179 static atomic_t n_rcu_torture_mberror; 180 static atomic_t n_rcu_torture_mbchk_fail; 181 static atomic_t n_rcu_torture_mbchk_tries; 182 static atomic_t n_rcu_torture_error; 183 static long n_rcu_torture_barrier_error; 184 static long n_rcu_torture_boost_ktrerror; 185 static long n_rcu_torture_boost_failure; 186 static long n_rcu_torture_boosts; 187 static atomic_long_t n_rcu_torture_timers; 188 static long n_barrier_attempts; 189 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 190 static unsigned long n_read_exits; 191 static struct list_head rcu_torture_removed; 192 static unsigned long shutdown_jiffies; 193 static unsigned long start_gp_seq; 194 static atomic_long_t n_nocb_offload; 195 static atomic_long_t n_nocb_deoffload; 196 197 static int rcu_torture_writer_state; 198 #define RTWS_FIXED_DELAY 0 199 #define RTWS_DELAY 1 200 #define RTWS_REPLACE 2 201 #define RTWS_DEF_FREE 3 202 #define RTWS_EXP_SYNC 4 203 #define RTWS_COND_GET 5 204 #define RTWS_COND_GET_FULL 6 205 #define RTWS_COND_GET_EXP 7 206 #define RTWS_COND_GET_EXP_FULL 8 207 #define RTWS_COND_SYNC 9 208 #define RTWS_COND_SYNC_FULL 10 209 #define RTWS_COND_SYNC_EXP 11 210 #define RTWS_COND_SYNC_EXP_FULL 12 211 #define RTWS_POLL_GET 13 212 #define RTWS_POLL_GET_FULL 14 213 #define RTWS_POLL_GET_EXP 15 214 #define RTWS_POLL_GET_EXP_FULL 16 215 #define RTWS_POLL_WAIT 17 216 #define RTWS_POLL_WAIT_FULL 18 217 #define RTWS_POLL_WAIT_EXP 19 218 #define RTWS_POLL_WAIT_EXP_FULL 20 219 #define RTWS_SYNC 21 220 #define RTWS_STUTTER 22 221 #define RTWS_STOPPING 23 222 static const char * const rcu_torture_writer_state_names[] = { 223 "RTWS_FIXED_DELAY", 224 "RTWS_DELAY", 225 "RTWS_REPLACE", 226 "RTWS_DEF_FREE", 227 "RTWS_EXP_SYNC", 228 "RTWS_COND_GET", 229 "RTWS_COND_GET_FULL", 230 "RTWS_COND_GET_EXP", 231 "RTWS_COND_GET_EXP_FULL", 232 "RTWS_COND_SYNC", 233 "RTWS_COND_SYNC_FULL", 234 "RTWS_COND_SYNC_EXP", 235 "RTWS_COND_SYNC_EXP_FULL", 236 "RTWS_POLL_GET", 237 "RTWS_POLL_GET_FULL", 238 "RTWS_POLL_GET_EXP", 239 "RTWS_POLL_GET_EXP_FULL", 240 "RTWS_POLL_WAIT", 241 "RTWS_POLL_WAIT_FULL", 242 "RTWS_POLL_WAIT_EXP", 243 "RTWS_POLL_WAIT_EXP_FULL", 244 "RTWS_SYNC", 245 "RTWS_STUTTER", 246 "RTWS_STOPPING", 247 }; 248 249 /* Record reader segment types and duration for first failing read. */ 250 struct rt_read_seg { 251 int rt_readstate; 252 unsigned long rt_delay_jiffies; 253 unsigned long rt_delay_ms; 254 unsigned long rt_delay_us; 255 bool rt_preempted; 256 }; 257 static int err_segs_recorded; 258 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 259 static int rt_read_nsegs; 260 261 static const char *rcu_torture_writer_state_getname(void) 262 { 263 unsigned int i = READ_ONCE(rcu_torture_writer_state); 264 265 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 266 return "???"; 267 return rcu_torture_writer_state_names[i]; 268 } 269 270 #ifdef CONFIG_RCU_TRACE 271 static u64 notrace rcu_trace_clock_local(void) 272 { 273 u64 ts = trace_clock_local(); 274 275 (void)do_div(ts, NSEC_PER_USEC); 276 return ts; 277 } 278 #else /* #ifdef CONFIG_RCU_TRACE */ 279 static u64 notrace rcu_trace_clock_local(void) 280 { 281 return 0ULL; 282 } 283 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 284 285 /* 286 * Stop aggressive CPU-hog tests a bit before the end of the test in order 287 * to avoid interfering with test shutdown. 288 */ 289 static bool shutdown_time_arrived(void) 290 { 291 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 292 } 293 294 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 295 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 296 /* and boost task create/destroy. */ 297 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 298 static bool barrier_phase; /* Test phase. */ 299 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 300 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 301 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 302 303 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 304 305 /* 306 * Allocate an element from the rcu_tortures pool. 307 */ 308 static struct rcu_torture * 309 rcu_torture_alloc(void) 310 { 311 struct list_head *p; 312 313 spin_lock_bh(&rcu_torture_lock); 314 if (list_empty(&rcu_torture_freelist)) { 315 atomic_inc(&n_rcu_torture_alloc_fail); 316 spin_unlock_bh(&rcu_torture_lock); 317 return NULL; 318 } 319 atomic_inc(&n_rcu_torture_alloc); 320 p = rcu_torture_freelist.next; 321 list_del_init(p); 322 spin_unlock_bh(&rcu_torture_lock); 323 return container_of(p, struct rcu_torture, rtort_free); 324 } 325 326 /* 327 * Free an element to the rcu_tortures pool. 328 */ 329 static void 330 rcu_torture_free(struct rcu_torture *p) 331 { 332 atomic_inc(&n_rcu_torture_free); 333 spin_lock_bh(&rcu_torture_lock); 334 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 335 spin_unlock_bh(&rcu_torture_lock); 336 } 337 338 /* 339 * Operations vector for selecting different types of tests. 340 */ 341 342 struct rcu_torture_ops { 343 int ttype; 344 void (*init)(void); 345 void (*cleanup)(void); 346 int (*readlock)(void); 347 void (*read_delay)(struct torture_random_state *rrsp, 348 struct rt_read_seg *rtrsp); 349 void (*readunlock)(int idx); 350 int (*readlock_held)(void); 351 unsigned long (*get_gp_seq)(void); 352 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 353 void (*deferred_free)(struct rcu_torture *p); 354 void (*sync)(void); 355 void (*exp_sync)(void); 356 unsigned long (*get_gp_state_exp)(void); 357 unsigned long (*start_gp_poll_exp)(void); 358 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 359 bool (*poll_gp_state_exp)(unsigned long oldstate); 360 void (*cond_sync_exp)(unsigned long oldstate); 361 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 362 unsigned long (*get_comp_state)(void); 363 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 364 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 365 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 366 unsigned long (*get_gp_state)(void); 367 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 368 unsigned long (*get_gp_completed)(void); 369 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); 370 unsigned long (*start_gp_poll)(void); 371 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 372 bool (*poll_gp_state)(unsigned long oldstate); 373 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 374 bool (*poll_need_2gp)(bool poll, bool poll_full); 375 void (*cond_sync)(unsigned long oldstate); 376 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 377 call_rcu_func_t call; 378 void (*cb_barrier)(void); 379 void (*fqs)(void); 380 void (*stats)(void); 381 void (*gp_kthread_dbg)(void); 382 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 383 int (*stall_dur)(void); 384 long cbflood_max; 385 int irq_capable; 386 int can_boost; 387 int extendables; 388 int slow_gps; 389 int no_pi_lock; 390 const char *name; 391 }; 392 393 static struct rcu_torture_ops *cur_ops; 394 395 /* 396 * Definitions for rcu torture testing. 397 */ 398 399 static int torture_readlock_not_held(void) 400 { 401 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 402 } 403 404 static int rcu_torture_read_lock(void) 405 { 406 rcu_read_lock(); 407 return 0; 408 } 409 410 static void 411 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 412 { 413 unsigned long started; 414 unsigned long completed; 415 const unsigned long shortdelay_us = 200; 416 unsigned long longdelay_ms = 300; 417 unsigned long long ts; 418 419 /* We want a short delay sometimes to make a reader delay the grace 420 * period, and we want a long delay occasionally to trigger 421 * force_quiescent_state. */ 422 423 if (!atomic_read(&rcu_fwd_cb_nodelay) && 424 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 425 started = cur_ops->get_gp_seq(); 426 ts = rcu_trace_clock_local(); 427 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 428 longdelay_ms = 5; /* Avoid triggering BH limits. */ 429 mdelay(longdelay_ms); 430 rtrsp->rt_delay_ms = longdelay_ms; 431 completed = cur_ops->get_gp_seq(); 432 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 433 started, completed); 434 } 435 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 436 udelay(shortdelay_us); 437 rtrsp->rt_delay_us = shortdelay_us; 438 } 439 if (!preempt_count() && 440 !(torture_random(rrsp) % (nrealreaders * 500))) { 441 torture_preempt_schedule(); /* QS only if preemptible. */ 442 rtrsp->rt_preempted = true; 443 } 444 } 445 446 static void rcu_torture_read_unlock(int idx) 447 { 448 rcu_read_unlock(); 449 } 450 451 /* 452 * Update callback in the pipe. This should be invoked after a grace period. 453 */ 454 static bool 455 rcu_torture_pipe_update_one(struct rcu_torture *rp) 456 { 457 int i; 458 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 459 460 if (rtrcp) { 461 WRITE_ONCE(rp->rtort_chkp, NULL); 462 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 463 } 464 i = READ_ONCE(rp->rtort_pipe_count); 465 if (i > RCU_TORTURE_PIPE_LEN) 466 i = RCU_TORTURE_PIPE_LEN; 467 atomic_inc(&rcu_torture_wcount[i]); 468 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 469 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 470 rp->rtort_mbtest = 0; 471 return true; 472 } 473 return false; 474 } 475 476 /* 477 * Update all callbacks in the pipe. Suitable for synchronous grace-period 478 * primitives. 479 */ 480 static void 481 rcu_torture_pipe_update(struct rcu_torture *old_rp) 482 { 483 struct rcu_torture *rp; 484 struct rcu_torture *rp1; 485 486 if (old_rp) 487 list_add(&old_rp->rtort_free, &rcu_torture_removed); 488 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 489 if (rcu_torture_pipe_update_one(rp)) { 490 list_del(&rp->rtort_free); 491 rcu_torture_free(rp); 492 } 493 } 494 } 495 496 static void 497 rcu_torture_cb(struct rcu_head *p) 498 { 499 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 500 501 if (torture_must_stop_irq()) { 502 /* Test is ending, just drop callbacks on the floor. */ 503 /* The next initialization will pick up the pieces. */ 504 return; 505 } 506 if (rcu_torture_pipe_update_one(rp)) 507 rcu_torture_free(rp); 508 else 509 cur_ops->deferred_free(rp); 510 } 511 512 static unsigned long rcu_no_completed(void) 513 { 514 return 0; 515 } 516 517 static void rcu_torture_deferred_free(struct rcu_torture *p) 518 { 519 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 520 } 521 522 static void rcu_sync_torture_init(void) 523 { 524 INIT_LIST_HEAD(&rcu_torture_removed); 525 } 526 527 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 528 { 529 return poll; 530 } 531 532 static struct rcu_torture_ops rcu_ops = { 533 .ttype = RCU_FLAVOR, 534 .init = rcu_sync_torture_init, 535 .readlock = rcu_torture_read_lock, 536 .read_delay = rcu_read_delay, 537 .readunlock = rcu_torture_read_unlock, 538 .readlock_held = torture_readlock_not_held, 539 .get_gp_seq = rcu_get_gp_seq, 540 .gp_diff = rcu_seq_diff, 541 .deferred_free = rcu_torture_deferred_free, 542 .sync = synchronize_rcu, 543 .exp_sync = synchronize_rcu_expedited, 544 .same_gp_state = same_state_synchronize_rcu, 545 .same_gp_state_full = same_state_synchronize_rcu_full, 546 .get_comp_state = get_completed_synchronize_rcu, 547 .get_comp_state_full = get_completed_synchronize_rcu_full, 548 .get_gp_state = get_state_synchronize_rcu, 549 .get_gp_state_full = get_state_synchronize_rcu_full, 550 .get_gp_completed = get_completed_synchronize_rcu, 551 .get_gp_completed_full = get_completed_synchronize_rcu_full, 552 .start_gp_poll = start_poll_synchronize_rcu, 553 .start_gp_poll_full = start_poll_synchronize_rcu_full, 554 .poll_gp_state = poll_state_synchronize_rcu, 555 .poll_gp_state_full = poll_state_synchronize_rcu_full, 556 .poll_need_2gp = rcu_poll_need_2gp, 557 .cond_sync = cond_synchronize_rcu, 558 .cond_sync_full = cond_synchronize_rcu_full, 559 .get_gp_state_exp = get_state_synchronize_rcu, 560 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 561 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 562 .poll_gp_state_exp = poll_state_synchronize_rcu, 563 .cond_sync_exp = cond_synchronize_rcu_expedited, 564 .call = call_rcu_hurry, 565 .cb_barrier = rcu_barrier, 566 .fqs = rcu_force_quiescent_state, 567 .stats = NULL, 568 .gp_kthread_dbg = show_rcu_gp_kthreads, 569 .check_boost_failed = rcu_check_boost_fail, 570 .stall_dur = rcu_jiffies_till_stall_check, 571 .irq_capable = 1, 572 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 573 .extendables = RCUTORTURE_MAX_EXTEND, 574 .name = "rcu" 575 }; 576 577 /* 578 * Don't even think about trying any of these in real life!!! 579 * The names includes "busted", and they really means it! 580 * The only purpose of these functions is to provide a buggy RCU 581 * implementation to make sure that rcutorture correctly emits 582 * buggy-RCU error messages. 583 */ 584 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 585 { 586 /* This is a deliberate bug for testing purposes only! */ 587 rcu_torture_cb(&p->rtort_rcu); 588 } 589 590 static void synchronize_rcu_busted(void) 591 { 592 /* This is a deliberate bug for testing purposes only! */ 593 } 594 595 static void 596 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 597 { 598 /* This is a deliberate bug for testing purposes only! */ 599 func(head); 600 } 601 602 static struct rcu_torture_ops rcu_busted_ops = { 603 .ttype = INVALID_RCU_FLAVOR, 604 .init = rcu_sync_torture_init, 605 .readlock = rcu_torture_read_lock, 606 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 607 .readunlock = rcu_torture_read_unlock, 608 .readlock_held = torture_readlock_not_held, 609 .get_gp_seq = rcu_no_completed, 610 .deferred_free = rcu_busted_torture_deferred_free, 611 .sync = synchronize_rcu_busted, 612 .exp_sync = synchronize_rcu_busted, 613 .call = call_rcu_busted, 614 .cb_barrier = NULL, 615 .fqs = NULL, 616 .stats = NULL, 617 .irq_capable = 1, 618 .name = "busted" 619 }; 620 621 /* 622 * Definitions for srcu torture testing. 623 */ 624 625 DEFINE_STATIC_SRCU(srcu_ctl); 626 static struct srcu_struct srcu_ctld; 627 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 628 static struct rcu_torture_ops srcud_ops; 629 630 static int srcu_torture_read_lock(void) 631 { 632 if (cur_ops == &srcud_ops) 633 return srcu_read_lock_nmisafe(srcu_ctlp); 634 else 635 return srcu_read_lock(srcu_ctlp); 636 } 637 638 static void 639 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 640 { 641 long delay; 642 const long uspertick = 1000000 / HZ; 643 const long longdelay = 10; 644 645 /* We want there to be long-running readers, but not all the time. */ 646 647 delay = torture_random(rrsp) % 648 (nrealreaders * 2 * longdelay * uspertick); 649 if (!delay && in_task()) { 650 schedule_timeout_interruptible(longdelay); 651 rtrsp->rt_delay_jiffies = longdelay; 652 } else { 653 rcu_read_delay(rrsp, rtrsp); 654 } 655 } 656 657 static void srcu_torture_read_unlock(int idx) 658 { 659 if (cur_ops == &srcud_ops) 660 srcu_read_unlock_nmisafe(srcu_ctlp, idx); 661 else 662 srcu_read_unlock(srcu_ctlp, idx); 663 } 664 665 static int torture_srcu_read_lock_held(void) 666 { 667 return srcu_read_lock_held(srcu_ctlp); 668 } 669 670 static unsigned long srcu_torture_completed(void) 671 { 672 return srcu_batches_completed(srcu_ctlp); 673 } 674 675 static void srcu_torture_deferred_free(struct rcu_torture *rp) 676 { 677 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 678 } 679 680 static void srcu_torture_synchronize(void) 681 { 682 synchronize_srcu(srcu_ctlp); 683 } 684 685 static unsigned long srcu_torture_get_gp_state(void) 686 { 687 return get_state_synchronize_srcu(srcu_ctlp); 688 } 689 690 static unsigned long srcu_torture_start_gp_poll(void) 691 { 692 return start_poll_synchronize_srcu(srcu_ctlp); 693 } 694 695 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 696 { 697 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 698 } 699 700 static void srcu_torture_call(struct rcu_head *head, 701 rcu_callback_t func) 702 { 703 call_srcu(srcu_ctlp, head, func); 704 } 705 706 static void srcu_torture_barrier(void) 707 { 708 srcu_barrier(srcu_ctlp); 709 } 710 711 static void srcu_torture_stats(void) 712 { 713 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 714 } 715 716 static void srcu_torture_synchronize_expedited(void) 717 { 718 synchronize_srcu_expedited(srcu_ctlp); 719 } 720 721 static struct rcu_torture_ops srcu_ops = { 722 .ttype = SRCU_FLAVOR, 723 .init = rcu_sync_torture_init, 724 .readlock = srcu_torture_read_lock, 725 .read_delay = srcu_read_delay, 726 .readunlock = srcu_torture_read_unlock, 727 .readlock_held = torture_srcu_read_lock_held, 728 .get_gp_seq = srcu_torture_completed, 729 .deferred_free = srcu_torture_deferred_free, 730 .sync = srcu_torture_synchronize, 731 .exp_sync = srcu_torture_synchronize_expedited, 732 .get_gp_state = srcu_torture_get_gp_state, 733 .start_gp_poll = srcu_torture_start_gp_poll, 734 .poll_gp_state = srcu_torture_poll_gp_state, 735 .call = srcu_torture_call, 736 .cb_barrier = srcu_torture_barrier, 737 .stats = srcu_torture_stats, 738 .cbflood_max = 50000, 739 .irq_capable = 1, 740 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 741 .name = "srcu" 742 }; 743 744 static void srcu_torture_init(void) 745 { 746 rcu_sync_torture_init(); 747 WARN_ON(init_srcu_struct(&srcu_ctld)); 748 srcu_ctlp = &srcu_ctld; 749 } 750 751 static void srcu_torture_cleanup(void) 752 { 753 cleanup_srcu_struct(&srcu_ctld); 754 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 755 } 756 757 /* As above, but dynamically allocated. */ 758 static struct rcu_torture_ops srcud_ops = { 759 .ttype = SRCU_FLAVOR, 760 .init = srcu_torture_init, 761 .cleanup = srcu_torture_cleanup, 762 .readlock = srcu_torture_read_lock, 763 .read_delay = srcu_read_delay, 764 .readunlock = srcu_torture_read_unlock, 765 .readlock_held = torture_srcu_read_lock_held, 766 .get_gp_seq = srcu_torture_completed, 767 .deferred_free = srcu_torture_deferred_free, 768 .sync = srcu_torture_synchronize, 769 .exp_sync = srcu_torture_synchronize_expedited, 770 .get_gp_state = srcu_torture_get_gp_state, 771 .start_gp_poll = srcu_torture_start_gp_poll, 772 .poll_gp_state = srcu_torture_poll_gp_state, 773 .call = srcu_torture_call, 774 .cb_barrier = srcu_torture_barrier, 775 .stats = srcu_torture_stats, 776 .cbflood_max = 50000, 777 .irq_capable = 1, 778 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 779 .name = "srcud" 780 }; 781 782 /* As above, but broken due to inappropriate reader extension. */ 783 static struct rcu_torture_ops busted_srcud_ops = { 784 .ttype = SRCU_FLAVOR, 785 .init = srcu_torture_init, 786 .cleanup = srcu_torture_cleanup, 787 .readlock = srcu_torture_read_lock, 788 .read_delay = rcu_read_delay, 789 .readunlock = srcu_torture_read_unlock, 790 .readlock_held = torture_srcu_read_lock_held, 791 .get_gp_seq = srcu_torture_completed, 792 .deferred_free = srcu_torture_deferred_free, 793 .sync = srcu_torture_synchronize, 794 .exp_sync = srcu_torture_synchronize_expedited, 795 .call = srcu_torture_call, 796 .cb_barrier = srcu_torture_barrier, 797 .stats = srcu_torture_stats, 798 .irq_capable = 1, 799 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 800 .extendables = RCUTORTURE_MAX_EXTEND, 801 .name = "busted_srcud" 802 }; 803 804 /* 805 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 806 * This implementation does not necessarily work well with CPU hotplug. 807 */ 808 809 static void synchronize_rcu_trivial(void) 810 { 811 int cpu; 812 813 for_each_online_cpu(cpu) { 814 torture_sched_setaffinity(current->pid, cpumask_of(cpu)); 815 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 816 } 817 } 818 819 static int rcu_torture_read_lock_trivial(void) 820 { 821 preempt_disable(); 822 return 0; 823 } 824 825 static void rcu_torture_read_unlock_trivial(int idx) 826 { 827 preempt_enable(); 828 } 829 830 static struct rcu_torture_ops trivial_ops = { 831 .ttype = RCU_TRIVIAL_FLAVOR, 832 .init = rcu_sync_torture_init, 833 .readlock = rcu_torture_read_lock_trivial, 834 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 835 .readunlock = rcu_torture_read_unlock_trivial, 836 .readlock_held = torture_readlock_not_held, 837 .get_gp_seq = rcu_no_completed, 838 .sync = synchronize_rcu_trivial, 839 .exp_sync = synchronize_rcu_trivial, 840 .fqs = NULL, 841 .stats = NULL, 842 .irq_capable = 1, 843 .name = "trivial" 844 }; 845 846 #ifdef CONFIG_TASKS_RCU 847 848 /* 849 * Definitions for RCU-tasks torture testing. 850 */ 851 852 static int tasks_torture_read_lock(void) 853 { 854 return 0; 855 } 856 857 static void tasks_torture_read_unlock(int idx) 858 { 859 } 860 861 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 862 { 863 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 864 } 865 866 static void synchronize_rcu_mult_test(void) 867 { 868 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 869 } 870 871 static struct rcu_torture_ops tasks_ops = { 872 .ttype = RCU_TASKS_FLAVOR, 873 .init = rcu_sync_torture_init, 874 .readlock = tasks_torture_read_lock, 875 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 876 .readunlock = tasks_torture_read_unlock, 877 .get_gp_seq = rcu_no_completed, 878 .deferred_free = rcu_tasks_torture_deferred_free, 879 .sync = synchronize_rcu_tasks, 880 .exp_sync = synchronize_rcu_mult_test, 881 .call = call_rcu_tasks, 882 .cb_barrier = rcu_barrier_tasks, 883 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 884 .fqs = NULL, 885 .stats = NULL, 886 .irq_capable = 1, 887 .slow_gps = 1, 888 .name = "tasks" 889 }; 890 891 #define TASKS_OPS &tasks_ops, 892 893 #else // #ifdef CONFIG_TASKS_RCU 894 895 #define TASKS_OPS 896 897 #endif // #else #ifdef CONFIG_TASKS_RCU 898 899 900 #ifdef CONFIG_TASKS_RUDE_RCU 901 902 /* 903 * Definitions for rude RCU-tasks torture testing. 904 */ 905 906 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 907 { 908 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 909 } 910 911 static struct rcu_torture_ops tasks_rude_ops = { 912 .ttype = RCU_TASKS_RUDE_FLAVOR, 913 .init = rcu_sync_torture_init, 914 .readlock = rcu_torture_read_lock_trivial, 915 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 916 .readunlock = rcu_torture_read_unlock_trivial, 917 .get_gp_seq = rcu_no_completed, 918 .deferred_free = rcu_tasks_rude_torture_deferred_free, 919 .sync = synchronize_rcu_tasks_rude, 920 .exp_sync = synchronize_rcu_tasks_rude, 921 .call = call_rcu_tasks_rude, 922 .cb_barrier = rcu_barrier_tasks_rude, 923 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 924 .cbflood_max = 50000, 925 .fqs = NULL, 926 .stats = NULL, 927 .irq_capable = 1, 928 .name = "tasks-rude" 929 }; 930 931 #define TASKS_RUDE_OPS &tasks_rude_ops, 932 933 #else // #ifdef CONFIG_TASKS_RUDE_RCU 934 935 #define TASKS_RUDE_OPS 936 937 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 938 939 940 #ifdef CONFIG_TASKS_TRACE_RCU 941 942 /* 943 * Definitions for tracing RCU-tasks torture testing. 944 */ 945 946 static int tasks_tracing_torture_read_lock(void) 947 { 948 rcu_read_lock_trace(); 949 return 0; 950 } 951 952 static void tasks_tracing_torture_read_unlock(int idx) 953 { 954 rcu_read_unlock_trace(); 955 } 956 957 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 958 { 959 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 960 } 961 962 static struct rcu_torture_ops tasks_tracing_ops = { 963 .ttype = RCU_TASKS_TRACING_FLAVOR, 964 .init = rcu_sync_torture_init, 965 .readlock = tasks_tracing_torture_read_lock, 966 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 967 .readunlock = tasks_tracing_torture_read_unlock, 968 .readlock_held = rcu_read_lock_trace_held, 969 .get_gp_seq = rcu_no_completed, 970 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 971 .sync = synchronize_rcu_tasks_trace, 972 .exp_sync = synchronize_rcu_tasks_trace, 973 .call = call_rcu_tasks_trace, 974 .cb_barrier = rcu_barrier_tasks_trace, 975 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 976 .cbflood_max = 50000, 977 .fqs = NULL, 978 .stats = NULL, 979 .irq_capable = 1, 980 .slow_gps = 1, 981 .name = "tasks-tracing" 982 }; 983 984 #define TASKS_TRACING_OPS &tasks_tracing_ops, 985 986 #else // #ifdef CONFIG_TASKS_TRACE_RCU 987 988 #define TASKS_TRACING_OPS 989 990 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 991 992 993 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 994 { 995 if (!cur_ops->gp_diff) 996 return new - old; 997 return cur_ops->gp_diff(new, old); 998 } 999 1000 /* 1001 * RCU torture priority-boost testing. Runs one real-time thread per 1002 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1003 * for them to complete. If a given grace period takes too long, we assume 1004 * that priority inversion has occurred. 1005 */ 1006 1007 static int old_rt_runtime = -1; 1008 1009 static void rcu_torture_disable_rt_throttle(void) 1010 { 1011 /* 1012 * Disable RT throttling so that rcutorture's boost threads don't get 1013 * throttled. Only possible if rcutorture is built-in otherwise the 1014 * user should manually do this by setting the sched_rt_period_us and 1015 * sched_rt_runtime sysctls. 1016 */ 1017 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1018 return; 1019 1020 old_rt_runtime = sysctl_sched_rt_runtime; 1021 sysctl_sched_rt_runtime = -1; 1022 } 1023 1024 static void rcu_torture_enable_rt_throttle(void) 1025 { 1026 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1027 return; 1028 1029 sysctl_sched_rt_runtime = old_rt_runtime; 1030 old_rt_runtime = -1; 1031 } 1032 1033 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1034 { 1035 int cpu; 1036 static int dbg_done; 1037 unsigned long end = jiffies; 1038 bool gp_done; 1039 unsigned long j; 1040 static unsigned long last_persist; 1041 unsigned long lp; 1042 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1043 1044 if (end - *start > mininterval) { 1045 // Recheck after checking time to avoid false positives. 1046 smp_mb(); // Time check before grace-period check. 1047 if (cur_ops->poll_gp_state(gp_state)) 1048 return false; // passed, though perhaps just barely 1049 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1050 // At most one persisted message per boost test. 1051 j = jiffies; 1052 lp = READ_ONCE(last_persist); 1053 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1054 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1055 return false; // passed on a technicality 1056 } 1057 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1058 n_rcu_torture_boost_failure++; 1059 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1060 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1061 current->rt_priority, gp_state, end - *start); 1062 cur_ops->gp_kthread_dbg(); 1063 // Recheck after print to flag grace period ending during splat. 1064 gp_done = cur_ops->poll_gp_state(gp_state); 1065 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1066 gp_done ? "ended already" : "still pending"); 1067 1068 } 1069 1070 return true; // failed 1071 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1072 *start = jiffies; 1073 } 1074 1075 return false; // passed 1076 } 1077 1078 static int rcu_torture_boost(void *arg) 1079 { 1080 unsigned long endtime; 1081 unsigned long gp_state; 1082 unsigned long gp_state_time; 1083 unsigned long oldstarttime; 1084 1085 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1086 1087 /* Set real-time priority. */ 1088 sched_set_fifo_low(current); 1089 1090 /* Each pass through the following loop does one boost-test cycle. */ 1091 do { 1092 bool failed = false; // Test failed already in this test interval 1093 bool gp_initiated = false; 1094 1095 if (kthread_should_stop()) 1096 goto checkwait; 1097 1098 /* Wait for the next test interval. */ 1099 oldstarttime = READ_ONCE(boost_starttime); 1100 while (time_before(jiffies, oldstarttime)) { 1101 schedule_timeout_interruptible(oldstarttime - jiffies); 1102 if (stutter_wait("rcu_torture_boost")) 1103 sched_set_fifo_low(current); 1104 if (torture_must_stop()) 1105 goto checkwait; 1106 } 1107 1108 // Do one boost-test interval. 1109 endtime = oldstarttime + test_boost_duration * HZ; 1110 while (time_before(jiffies, endtime)) { 1111 // Has current GP gone too long? 1112 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1113 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1114 // If we don't have a grace period in flight, start one. 1115 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1116 gp_state = cur_ops->start_gp_poll(); 1117 gp_initiated = true; 1118 gp_state_time = jiffies; 1119 } 1120 if (stutter_wait("rcu_torture_boost")) { 1121 sched_set_fifo_low(current); 1122 // If the grace period already ended, 1123 // we don't know when that happened, so 1124 // start over. 1125 if (cur_ops->poll_gp_state(gp_state)) 1126 gp_initiated = false; 1127 } 1128 if (torture_must_stop()) 1129 goto checkwait; 1130 } 1131 1132 // In case the grace period extended beyond the end of the loop. 1133 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1134 rcu_torture_boost_failed(gp_state, &gp_state_time); 1135 1136 /* 1137 * Set the start time of the next test interval. 1138 * Yes, this is vulnerable to long delays, but such 1139 * delays simply cause a false negative for the next 1140 * interval. Besides, we are running at RT priority, 1141 * so delays should be relatively rare. 1142 */ 1143 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1144 if (mutex_trylock(&boost_mutex)) { 1145 if (oldstarttime == boost_starttime) { 1146 WRITE_ONCE(boost_starttime, 1147 jiffies + test_boost_interval * HZ); 1148 n_rcu_torture_boosts++; 1149 } 1150 mutex_unlock(&boost_mutex); 1151 break; 1152 } 1153 schedule_timeout_uninterruptible(HZ / 20); 1154 } 1155 1156 /* Go do the stutter. */ 1157 checkwait: if (stutter_wait("rcu_torture_boost")) 1158 sched_set_fifo_low(current); 1159 } while (!torture_must_stop()); 1160 1161 /* Clean up and exit. */ 1162 while (!kthread_should_stop()) { 1163 torture_shutdown_absorb("rcu_torture_boost"); 1164 schedule_timeout_uninterruptible(HZ / 20); 1165 } 1166 torture_kthread_stopping("rcu_torture_boost"); 1167 return 0; 1168 } 1169 1170 /* 1171 * RCU torture force-quiescent-state kthread. Repeatedly induces 1172 * bursts of calls to force_quiescent_state(), increasing the probability 1173 * of occurrence of some important types of race conditions. 1174 */ 1175 static int 1176 rcu_torture_fqs(void *arg) 1177 { 1178 unsigned long fqs_resume_time; 1179 int fqs_burst_remaining; 1180 int oldnice = task_nice(current); 1181 1182 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1183 do { 1184 fqs_resume_time = jiffies + fqs_stutter * HZ; 1185 while (time_before(jiffies, fqs_resume_time) && 1186 !kthread_should_stop()) { 1187 schedule_timeout_interruptible(HZ / 20); 1188 } 1189 fqs_burst_remaining = fqs_duration; 1190 while (fqs_burst_remaining > 0 && 1191 !kthread_should_stop()) { 1192 cur_ops->fqs(); 1193 udelay(fqs_holdoff); 1194 fqs_burst_remaining -= fqs_holdoff; 1195 } 1196 if (stutter_wait("rcu_torture_fqs")) 1197 sched_set_normal(current, oldnice); 1198 } while (!torture_must_stop()); 1199 torture_kthread_stopping("rcu_torture_fqs"); 1200 return 0; 1201 } 1202 1203 // Used by writers to randomly choose from the available grace-period primitives. 1204 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1205 static int nsynctypes; 1206 1207 /* 1208 * Determine which grace-period primitives are available. 1209 */ 1210 static void rcu_torture_write_types(void) 1211 { 1212 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1213 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1214 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1215 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1216 1217 /* Initialize synctype[] array. If none set, take default. */ 1218 if (!gp_cond1 && 1219 !gp_cond_exp1 && 1220 !gp_cond_full1 && 1221 !gp_cond_exp_full1 && 1222 !gp_exp1 && 1223 !gp_poll_exp1 && 1224 !gp_poll_exp_full1 && 1225 !gp_normal1 && 1226 !gp_poll1 && 1227 !gp_poll_full1 && 1228 !gp_sync1) { 1229 gp_cond1 = true; 1230 gp_cond_exp1 = true; 1231 gp_cond_full1 = true; 1232 gp_cond_exp_full1 = true; 1233 gp_exp1 = true; 1234 gp_poll_exp1 = true; 1235 gp_poll_exp_full1 = true; 1236 gp_normal1 = true; 1237 gp_poll1 = true; 1238 gp_poll_full1 = true; 1239 gp_sync1 = true; 1240 } 1241 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1242 synctype[nsynctypes++] = RTWS_COND_GET; 1243 pr_info("%s: Testing conditional GPs.\n", __func__); 1244 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1245 pr_alert("%s: gp_cond without primitives.\n", __func__); 1246 } 1247 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1248 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1249 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1250 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1251 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1252 } 1253 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1254 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1255 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1256 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1257 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1258 } 1259 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1260 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1261 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1262 } else if (gp_cond_exp_full && 1263 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1264 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1265 } 1266 if (gp_exp1 && cur_ops->exp_sync) { 1267 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1268 pr_info("%s: Testing expedited GPs.\n", __func__); 1269 } else if (gp_exp && !cur_ops->exp_sync) { 1270 pr_alert("%s: gp_exp without primitives.\n", __func__); 1271 } 1272 if (gp_normal1 && cur_ops->deferred_free) { 1273 synctype[nsynctypes++] = RTWS_DEF_FREE; 1274 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1275 } else if (gp_normal && !cur_ops->deferred_free) { 1276 pr_alert("%s: gp_normal without primitives.\n", __func__); 1277 } 1278 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1279 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1280 synctype[nsynctypes++] = RTWS_POLL_GET; 1281 pr_info("%s: Testing polling GPs.\n", __func__); 1282 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1283 pr_alert("%s: gp_poll without primitives.\n", __func__); 1284 } 1285 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1286 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1287 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1288 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1289 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1290 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1291 } 1292 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1293 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1294 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1295 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1296 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1297 } 1298 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1299 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1300 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1301 } else if (gp_poll_exp_full && 1302 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1303 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1304 } 1305 if (gp_sync1 && cur_ops->sync) { 1306 synctype[nsynctypes++] = RTWS_SYNC; 1307 pr_info("%s: Testing normal GPs.\n", __func__); 1308 } else if (gp_sync && !cur_ops->sync) { 1309 pr_alert("%s: gp_sync without primitives.\n", __func__); 1310 } 1311 } 1312 1313 /* 1314 * Do the specified rcu_torture_writer() synchronous grace period, 1315 * while also testing out the polled APIs. Note well that the single-CPU 1316 * grace-period optimizations must be accounted for. 1317 */ 1318 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1319 { 1320 unsigned long cookie; 1321 struct rcu_gp_oldstate cookie_full; 1322 bool dopoll; 1323 bool dopoll_full; 1324 unsigned long r = torture_random(trsp); 1325 1326 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1327 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1328 if (dopoll || dopoll_full) 1329 cpus_read_lock(); 1330 if (dopoll) 1331 cookie = cur_ops->get_gp_state(); 1332 if (dopoll_full) 1333 cur_ops->get_gp_state_full(&cookie_full); 1334 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1335 sync(); 1336 sync(); 1337 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1338 "%s: Cookie check 3 failed %pS() online %*pbl.", 1339 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1340 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1341 "%s: Cookie check 4 failed %pS() online %*pbl", 1342 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1343 if (dopoll || dopoll_full) 1344 cpus_read_unlock(); 1345 } 1346 1347 /* 1348 * RCU torture writer kthread. Repeatedly substitutes a new structure 1349 * for that pointed to by rcu_torture_current, freeing the old structure 1350 * after a series of grace periods (the "pipeline"). 1351 */ 1352 static int 1353 rcu_torture_writer(void *arg) 1354 { 1355 bool boot_ended; 1356 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1357 unsigned long cookie; 1358 struct rcu_gp_oldstate cookie_full; 1359 int expediting = 0; 1360 unsigned long gp_snap; 1361 unsigned long gp_snap1; 1362 struct rcu_gp_oldstate gp_snap_full; 1363 struct rcu_gp_oldstate gp_snap1_full; 1364 int i; 1365 int idx; 1366 int oldnice = task_nice(current); 1367 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE]; 1368 struct rcu_torture *rp; 1369 struct rcu_torture *old_rp; 1370 static DEFINE_TORTURE_RANDOM(rand); 1371 unsigned long stallsdone = jiffies; 1372 bool stutter_waited; 1373 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE]; 1374 1375 // If a new stall test is added, this must be adjusted. 1376 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1377 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ; 1378 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1379 if (!can_expedite) 1380 pr_alert("%s" TORTURE_FLAG 1381 " GP expediting controlled from boot/sysfs for %s.\n", 1382 torture_type, cur_ops->name); 1383 if (WARN_ONCE(nsynctypes == 0, 1384 "%s: No update-side primitives.\n", __func__)) { 1385 /* 1386 * No updates primitives, so don't try updating. 1387 * The resulting test won't be testing much, hence the 1388 * above WARN_ONCE(). 1389 */ 1390 rcu_torture_writer_state = RTWS_STOPPING; 1391 torture_kthread_stopping("rcu_torture_writer"); 1392 return 0; 1393 } 1394 1395 do { 1396 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1397 torture_hrtimeout_us(500, 1000, &rand); 1398 rp = rcu_torture_alloc(); 1399 if (rp == NULL) 1400 continue; 1401 rp->rtort_pipe_count = 0; 1402 rcu_torture_writer_state = RTWS_DELAY; 1403 udelay(torture_random(&rand) & 0x3ff); 1404 rcu_torture_writer_state = RTWS_REPLACE; 1405 old_rp = rcu_dereference_check(rcu_torture_current, 1406 current == writer_task); 1407 rp->rtort_mbtest = 1; 1408 rcu_assign_pointer(rcu_torture_current, rp); 1409 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1410 if (old_rp) { 1411 i = old_rp->rtort_pipe_count; 1412 if (i > RCU_TORTURE_PIPE_LEN) 1413 i = RCU_TORTURE_PIPE_LEN; 1414 atomic_inc(&rcu_torture_wcount[i]); 1415 WRITE_ONCE(old_rp->rtort_pipe_count, 1416 old_rp->rtort_pipe_count + 1); 1417 1418 // Make sure readers block polled grace periods. 1419 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1420 idx = cur_ops->readlock(); 1421 cookie = cur_ops->get_gp_state(); 1422 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1423 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1424 __func__, 1425 rcu_torture_writer_state_getname(), 1426 rcu_torture_writer_state, 1427 cookie, cur_ops->get_gp_state()); 1428 if (cur_ops->get_gp_completed) { 1429 cookie = cur_ops->get_gp_completed(); 1430 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1431 } 1432 cur_ops->readunlock(idx); 1433 } 1434 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1435 idx = cur_ops->readlock(); 1436 cur_ops->get_gp_state_full(&cookie_full); 1437 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1438 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1439 __func__, 1440 rcu_torture_writer_state_getname(), 1441 rcu_torture_writer_state, 1442 cpumask_pr_args(cpu_online_mask)); 1443 if (cur_ops->get_gp_completed_full) { 1444 cur_ops->get_gp_completed_full(&cookie_full); 1445 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1446 } 1447 cur_ops->readunlock(idx); 1448 } 1449 switch (synctype[torture_random(&rand) % nsynctypes]) { 1450 case RTWS_DEF_FREE: 1451 rcu_torture_writer_state = RTWS_DEF_FREE; 1452 cur_ops->deferred_free(old_rp); 1453 break; 1454 case RTWS_EXP_SYNC: 1455 rcu_torture_writer_state = RTWS_EXP_SYNC; 1456 do_rtws_sync(&rand, cur_ops->exp_sync); 1457 rcu_torture_pipe_update(old_rp); 1458 break; 1459 case RTWS_COND_GET: 1460 rcu_torture_writer_state = RTWS_COND_GET; 1461 gp_snap = cur_ops->get_gp_state(); 1462 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1463 rcu_torture_writer_state = RTWS_COND_SYNC; 1464 cur_ops->cond_sync(gp_snap); 1465 rcu_torture_pipe_update(old_rp); 1466 break; 1467 case RTWS_COND_GET_EXP: 1468 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1469 gp_snap = cur_ops->get_gp_state_exp(); 1470 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1471 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1472 cur_ops->cond_sync_exp(gp_snap); 1473 rcu_torture_pipe_update(old_rp); 1474 break; 1475 case RTWS_COND_GET_FULL: 1476 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1477 cur_ops->get_gp_state_full(&gp_snap_full); 1478 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1479 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1480 cur_ops->cond_sync_full(&gp_snap_full); 1481 rcu_torture_pipe_update(old_rp); 1482 break; 1483 case RTWS_COND_GET_EXP_FULL: 1484 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1485 cur_ops->get_gp_state_full(&gp_snap_full); 1486 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1487 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1488 cur_ops->cond_sync_exp_full(&gp_snap_full); 1489 rcu_torture_pipe_update(old_rp); 1490 break; 1491 case RTWS_POLL_GET: 1492 rcu_torture_writer_state = RTWS_POLL_GET; 1493 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1494 ulo[i] = cur_ops->get_comp_state(); 1495 gp_snap = cur_ops->start_gp_poll(); 1496 rcu_torture_writer_state = RTWS_POLL_WAIT; 1497 while (!cur_ops->poll_gp_state(gp_snap)) { 1498 gp_snap1 = cur_ops->get_gp_state(); 1499 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1500 if (cur_ops->poll_gp_state(ulo[i]) || 1501 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1502 ulo[i] = gp_snap1; 1503 break; 1504 } 1505 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo)); 1506 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1507 &rand); 1508 } 1509 rcu_torture_pipe_update(old_rp); 1510 break; 1511 case RTWS_POLL_GET_FULL: 1512 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1513 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1514 cur_ops->get_comp_state_full(&rgo[i]); 1515 cur_ops->start_gp_poll_full(&gp_snap_full); 1516 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1517 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1518 cur_ops->get_gp_state_full(&gp_snap1_full); 1519 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1520 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1521 cur_ops->same_gp_state_full(&rgo[i], 1522 &gp_snap1_full)) { 1523 rgo[i] = gp_snap1_full; 1524 break; 1525 } 1526 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo)); 1527 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1528 &rand); 1529 } 1530 rcu_torture_pipe_update(old_rp); 1531 break; 1532 case RTWS_POLL_GET_EXP: 1533 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1534 gp_snap = cur_ops->start_gp_poll_exp(); 1535 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1536 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1537 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1538 &rand); 1539 rcu_torture_pipe_update(old_rp); 1540 break; 1541 case RTWS_POLL_GET_EXP_FULL: 1542 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1543 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1544 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1545 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1546 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1547 &rand); 1548 rcu_torture_pipe_update(old_rp); 1549 break; 1550 case RTWS_SYNC: 1551 rcu_torture_writer_state = RTWS_SYNC; 1552 do_rtws_sync(&rand, cur_ops->sync); 1553 rcu_torture_pipe_update(old_rp); 1554 break; 1555 default: 1556 WARN_ON_ONCE(1); 1557 break; 1558 } 1559 } 1560 WRITE_ONCE(rcu_torture_current_version, 1561 rcu_torture_current_version + 1); 1562 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1563 if (can_expedite && 1564 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1565 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1566 if (expediting >= 0) 1567 rcu_expedite_gp(); 1568 else 1569 rcu_unexpedite_gp(); 1570 if (++expediting > 3) 1571 expediting = -expediting; 1572 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1573 can_expedite = !rcu_gp_is_expedited() && 1574 !rcu_gp_is_normal(); 1575 } 1576 rcu_torture_writer_state = RTWS_STUTTER; 1577 boot_ended = rcu_inkernel_boot_has_ended(); 1578 stutter_waited = stutter_wait("rcu_torture_writer"); 1579 if (stutter_waited && 1580 !atomic_read(&rcu_fwd_cb_nodelay) && 1581 !cur_ops->slow_gps && 1582 !torture_must_stop() && 1583 boot_ended && 1584 time_after(jiffies, stallsdone)) 1585 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1586 if (list_empty(&rcu_tortures[i].rtort_free) && 1587 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1588 tracing_off(); 1589 show_rcu_gp_kthreads(); 1590 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1591 rcu_ftrace_dump(DUMP_ALL); 1592 } 1593 if (stutter_waited) 1594 sched_set_normal(current, oldnice); 1595 } while (!torture_must_stop()); 1596 rcu_torture_current = NULL; // Let stats task know that we are done. 1597 /* Reset expediting back to unexpedited. */ 1598 if (expediting > 0) 1599 expediting = -expediting; 1600 while (can_expedite && expediting++ < 0) 1601 rcu_unexpedite_gp(); 1602 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1603 if (!can_expedite) 1604 pr_alert("%s" TORTURE_FLAG 1605 " Dynamic grace-period expediting was disabled.\n", 1606 torture_type); 1607 rcu_torture_writer_state = RTWS_STOPPING; 1608 torture_kthread_stopping("rcu_torture_writer"); 1609 return 0; 1610 } 1611 1612 /* 1613 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1614 * delay between calls. 1615 */ 1616 static int 1617 rcu_torture_fakewriter(void *arg) 1618 { 1619 unsigned long gp_snap; 1620 struct rcu_gp_oldstate gp_snap_full; 1621 DEFINE_TORTURE_RANDOM(rand); 1622 1623 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1624 set_user_nice(current, MAX_NICE); 1625 1626 if (WARN_ONCE(nsynctypes == 0, 1627 "%s: No update-side primitives.\n", __func__)) { 1628 /* 1629 * No updates primitives, so don't try updating. 1630 * The resulting test won't be testing much, hence the 1631 * above WARN_ONCE(). 1632 */ 1633 torture_kthread_stopping("rcu_torture_fakewriter"); 1634 return 0; 1635 } 1636 1637 do { 1638 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1639 if (cur_ops->cb_barrier != NULL && 1640 torture_random(&rand) % (nfakewriters * 8) == 0) { 1641 cur_ops->cb_barrier(); 1642 } else { 1643 switch (synctype[torture_random(&rand) % nsynctypes]) { 1644 case RTWS_DEF_FREE: 1645 break; 1646 case RTWS_EXP_SYNC: 1647 cur_ops->exp_sync(); 1648 break; 1649 case RTWS_COND_GET: 1650 gp_snap = cur_ops->get_gp_state(); 1651 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1652 cur_ops->cond_sync(gp_snap); 1653 break; 1654 case RTWS_COND_GET_EXP: 1655 gp_snap = cur_ops->get_gp_state_exp(); 1656 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1657 cur_ops->cond_sync_exp(gp_snap); 1658 break; 1659 case RTWS_COND_GET_FULL: 1660 cur_ops->get_gp_state_full(&gp_snap_full); 1661 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1662 cur_ops->cond_sync_full(&gp_snap_full); 1663 break; 1664 case RTWS_COND_GET_EXP_FULL: 1665 cur_ops->get_gp_state_full(&gp_snap_full); 1666 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1667 cur_ops->cond_sync_exp_full(&gp_snap_full); 1668 break; 1669 case RTWS_POLL_GET: 1670 gp_snap = cur_ops->start_gp_poll(); 1671 while (!cur_ops->poll_gp_state(gp_snap)) { 1672 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1673 &rand); 1674 } 1675 break; 1676 case RTWS_POLL_GET_FULL: 1677 cur_ops->start_gp_poll_full(&gp_snap_full); 1678 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1679 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1680 &rand); 1681 } 1682 break; 1683 case RTWS_POLL_GET_EXP: 1684 gp_snap = cur_ops->start_gp_poll_exp(); 1685 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1686 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1687 &rand); 1688 } 1689 break; 1690 case RTWS_POLL_GET_EXP_FULL: 1691 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1692 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1693 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1694 &rand); 1695 } 1696 break; 1697 case RTWS_SYNC: 1698 cur_ops->sync(); 1699 break; 1700 default: 1701 WARN_ON_ONCE(1); 1702 break; 1703 } 1704 } 1705 stutter_wait("rcu_torture_fakewriter"); 1706 } while (!torture_must_stop()); 1707 1708 torture_kthread_stopping("rcu_torture_fakewriter"); 1709 return 0; 1710 } 1711 1712 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1713 { 1714 kfree(rhp); 1715 } 1716 1717 // Set up and carry out testing of RCU's global memory ordering 1718 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1719 struct torture_random_state *trsp) 1720 { 1721 unsigned long loops; 1722 int noc = torture_num_online_cpus(); 1723 int rdrchked; 1724 int rdrchker; 1725 struct rcu_torture_reader_check *rtrcp; // Me. 1726 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1727 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1728 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1729 1730 if (myid < 0) 1731 return; // Don't try this from timer handlers. 1732 1733 // Increment my counter. 1734 rtrcp = &rcu_torture_reader_mbchk[myid]; 1735 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1736 1737 // Attempt to assign someone else some checking work. 1738 rdrchked = torture_random(trsp) % nrealreaders; 1739 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1740 rdrchker = torture_random(trsp) % nrealreaders; 1741 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1742 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1743 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1744 !READ_ONCE(rtp->rtort_chkp) && 1745 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1746 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1747 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1748 rtrcp->rtc_chkrdr = rdrchked; 1749 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1750 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1751 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1752 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1753 } 1754 1755 // If assigned some completed work, do it! 1756 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1757 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1758 return; // No work or work not yet ready. 1759 rdrchked = rtrcp_assigner->rtc_chkrdr; 1760 if (WARN_ON_ONCE(rdrchked < 0)) 1761 return; 1762 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1763 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1764 atomic_inc(&n_rcu_torture_mbchk_tries); 1765 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1766 atomic_inc(&n_rcu_torture_mbchk_fail); 1767 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1768 rtrcp_assigner->rtc_ready = 0; 1769 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1770 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1771 } 1772 1773 /* 1774 * Do one extension of an RCU read-side critical section using the 1775 * current reader state in readstate (set to zero for initial entry 1776 * to extended critical section), set the new state as specified by 1777 * newstate (set to zero for final exit from extended critical section), 1778 * and random-number-generator state in trsp. If this is neither the 1779 * beginning or end of the critical section and if there was actually a 1780 * change, do a ->read_delay(). 1781 */ 1782 static void rcutorture_one_extend(int *readstate, int newstate, 1783 struct torture_random_state *trsp, 1784 struct rt_read_seg *rtrsp) 1785 { 1786 unsigned long flags; 1787 int idxnew1 = -1; 1788 int idxnew2 = -1; 1789 int idxold1 = *readstate; 1790 int idxold2 = idxold1; 1791 int statesnew = ~*readstate & newstate; 1792 int statesold = *readstate & ~newstate; 1793 1794 WARN_ON_ONCE(idxold2 < 0); 1795 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1796 rtrsp->rt_readstate = newstate; 1797 1798 /* First, put new protection in place to avoid critical-section gap. */ 1799 if (statesnew & RCUTORTURE_RDR_BH) 1800 local_bh_disable(); 1801 if (statesnew & RCUTORTURE_RDR_RBH) 1802 rcu_read_lock_bh(); 1803 if (statesnew & RCUTORTURE_RDR_IRQ) 1804 local_irq_disable(); 1805 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1806 preempt_disable(); 1807 if (statesnew & RCUTORTURE_RDR_SCHED) 1808 rcu_read_lock_sched(); 1809 if (statesnew & RCUTORTURE_RDR_RCU_1) 1810 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1811 if (statesnew & RCUTORTURE_RDR_RCU_2) 1812 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1813 1814 /* 1815 * Next, remove old protection, in decreasing order of strength 1816 * to avoid unlock paths that aren't safe in the stronger 1817 * context. Namely: BH can not be enabled with disabled interrupts. 1818 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1819 * context. 1820 */ 1821 if (statesold & RCUTORTURE_RDR_IRQ) 1822 local_irq_enable(); 1823 if (statesold & RCUTORTURE_RDR_PREEMPT) 1824 preempt_enable(); 1825 if (statesold & RCUTORTURE_RDR_SCHED) 1826 rcu_read_unlock_sched(); 1827 if (statesold & RCUTORTURE_RDR_BH) 1828 local_bh_enable(); 1829 if (statesold & RCUTORTURE_RDR_RBH) 1830 rcu_read_unlock_bh(); 1831 if (statesold & RCUTORTURE_RDR_RCU_2) { 1832 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1833 WARN_ON_ONCE(idxnew2 != -1); 1834 idxold2 = 0; 1835 } 1836 if (statesold & RCUTORTURE_RDR_RCU_1) { 1837 bool lockit; 1838 1839 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1840 if (lockit) 1841 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1842 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1843 WARN_ON_ONCE(idxnew1 != -1); 1844 idxold1 = 0; 1845 if (lockit) 1846 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1847 } 1848 1849 /* Delay if neither beginning nor end and there was a change. */ 1850 if ((statesnew || statesold) && *readstate && newstate) 1851 cur_ops->read_delay(trsp, rtrsp); 1852 1853 /* Update the reader state. */ 1854 if (idxnew1 == -1) 1855 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1856 WARN_ON_ONCE(idxnew1 < 0); 1857 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1858 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1859 if (idxnew2 == -1) 1860 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1861 WARN_ON_ONCE(idxnew2 < 0); 1862 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1863 *readstate = idxnew1 | idxnew2 | newstate; 1864 WARN_ON_ONCE(*readstate < 0); 1865 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1866 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1867 } 1868 1869 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1870 static int rcutorture_extend_mask_max(void) 1871 { 1872 int mask; 1873 1874 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1875 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1876 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1877 return mask; 1878 } 1879 1880 /* Return a random protection state mask, but with at least one bit set. */ 1881 static int 1882 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1883 { 1884 int mask = rcutorture_extend_mask_max(); 1885 unsigned long randmask1 = torture_random(trsp); 1886 unsigned long randmask2 = randmask1 >> 3; 1887 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1888 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1889 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1890 1891 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1892 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1893 if (!(randmask1 & 0x7)) 1894 mask = mask & randmask2; 1895 else 1896 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1897 1898 // Can't have nested RCU reader without outer RCU reader. 1899 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1900 if (oldmask & RCUTORTURE_RDR_RCU_1) 1901 mask &= ~RCUTORTURE_RDR_RCU_2; 1902 else 1903 mask |= RCUTORTURE_RDR_RCU_1; 1904 } 1905 1906 /* 1907 * Can't enable bh w/irq disabled. 1908 */ 1909 if (mask & RCUTORTURE_RDR_IRQ) 1910 mask |= oldmask & bhs; 1911 1912 /* 1913 * Ideally these sequences would be detected in debug builds 1914 * (regardless of RT), but until then don't stop testing 1915 * them on non-RT. 1916 */ 1917 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1918 /* Can't modify BH in atomic context */ 1919 if (oldmask & preempts_irq) 1920 mask &= ~bhs; 1921 if ((oldmask | mask) & preempts_irq) 1922 mask |= oldmask & bhs; 1923 } 1924 1925 return mask ?: RCUTORTURE_RDR_RCU_1; 1926 } 1927 1928 /* 1929 * Do a randomly selected number of extensions of an existing RCU read-side 1930 * critical section. 1931 */ 1932 static struct rt_read_seg * 1933 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1934 struct rt_read_seg *rtrsp) 1935 { 1936 int i; 1937 int j; 1938 int mask = rcutorture_extend_mask_max(); 1939 1940 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1941 if (!((mask - 1) & mask)) 1942 return rtrsp; /* Current RCU reader not extendable. */ 1943 /* Bias towards larger numbers of loops. */ 1944 i = torture_random(trsp); 1945 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1946 for (j = 0; j < i; j++) { 1947 mask = rcutorture_extend_mask(*readstate, trsp); 1948 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1949 } 1950 return &rtrsp[j]; 1951 } 1952 1953 /* 1954 * Do one read-side critical section, returning false if there was 1955 * no data to read. Can be invoked both from process context and 1956 * from a timer handler. 1957 */ 1958 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1959 { 1960 bool checkpolling = !(torture_random(trsp) & 0xfff); 1961 unsigned long cookie; 1962 struct rcu_gp_oldstate cookie_full; 1963 int i; 1964 unsigned long started; 1965 unsigned long completed; 1966 int newstate; 1967 struct rcu_torture *p; 1968 int pipe_count; 1969 int readstate = 0; 1970 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1971 struct rt_read_seg *rtrsp = &rtseg[0]; 1972 struct rt_read_seg *rtrsp1; 1973 unsigned long long ts; 1974 1975 WARN_ON_ONCE(!rcu_is_watching()); 1976 newstate = rcutorture_extend_mask(readstate, trsp); 1977 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1978 if (checkpolling) { 1979 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1980 cookie = cur_ops->get_gp_state(); 1981 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1982 cur_ops->get_gp_state_full(&cookie_full); 1983 } 1984 started = cur_ops->get_gp_seq(); 1985 ts = rcu_trace_clock_local(); 1986 p = rcu_dereference_check(rcu_torture_current, 1987 !cur_ops->readlock_held || cur_ops->readlock_held()); 1988 if (p == NULL) { 1989 /* Wait for rcu_torture_writer to get underway */ 1990 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1991 return false; 1992 } 1993 if (p->rtort_mbtest == 0) 1994 atomic_inc(&n_rcu_torture_mberror); 1995 rcu_torture_reader_do_mbchk(myid, p, trsp); 1996 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1997 preempt_disable(); 1998 pipe_count = READ_ONCE(p->rtort_pipe_count); 1999 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2000 /* Should not happen, but... */ 2001 pipe_count = RCU_TORTURE_PIPE_LEN; 2002 } 2003 completed = cur_ops->get_gp_seq(); 2004 if (pipe_count > 1) { 2005 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 2006 ts, started, completed); 2007 rcu_ftrace_dump(DUMP_ALL); 2008 } 2009 __this_cpu_inc(rcu_torture_count[pipe_count]); 2010 completed = rcutorture_seq_diff(completed, started); 2011 if (completed > RCU_TORTURE_PIPE_LEN) { 2012 /* Should not happen, but... */ 2013 completed = RCU_TORTURE_PIPE_LEN; 2014 } 2015 __this_cpu_inc(rcu_torture_batch[completed]); 2016 preempt_enable(); 2017 if (checkpolling) { 2018 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2019 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2020 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2021 __func__, 2022 rcu_torture_writer_state_getname(), 2023 rcu_torture_writer_state, 2024 cookie, cur_ops->get_gp_state()); 2025 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2026 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2027 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2028 __func__, 2029 rcu_torture_writer_state_getname(), 2030 rcu_torture_writer_state, 2031 cpumask_pr_args(cpu_online_mask)); 2032 } 2033 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2034 WARN_ON_ONCE(readstate); 2035 // This next splat is expected behavior if leakpointer, especially 2036 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2037 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2038 2039 /* If error or close call, record the sequence of reader protections. */ 2040 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2041 i = 0; 2042 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2043 err_segs[i++] = *rtrsp1; 2044 rt_read_nsegs = i; 2045 } 2046 2047 return true; 2048 } 2049 2050 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2051 2052 /* 2053 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2054 * incrementing the corresponding element of the pipeline array. The 2055 * counter in the element should never be greater than 1, otherwise, the 2056 * RCU implementation is broken. 2057 */ 2058 static void rcu_torture_timer(struct timer_list *unused) 2059 { 2060 atomic_long_inc(&n_rcu_torture_timers); 2061 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2062 2063 /* Test call_rcu() invocation from interrupt handler. */ 2064 if (cur_ops->call) { 2065 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2066 2067 if (rhp) 2068 cur_ops->call(rhp, rcu_torture_timer_cb); 2069 } 2070 } 2071 2072 /* 2073 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2074 * incrementing the corresponding element of the pipeline array. The 2075 * counter in the element should never be greater than 1, otherwise, the 2076 * RCU implementation is broken. 2077 */ 2078 static int 2079 rcu_torture_reader(void *arg) 2080 { 2081 unsigned long lastsleep = jiffies; 2082 long myid = (long)arg; 2083 int mynumonline = myid; 2084 DEFINE_TORTURE_RANDOM(rand); 2085 struct timer_list t; 2086 2087 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2088 set_user_nice(current, MAX_NICE); 2089 if (irqreader && cur_ops->irq_capable) 2090 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2091 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2092 do { 2093 if (irqreader && cur_ops->irq_capable) { 2094 if (!timer_pending(&t)) 2095 mod_timer(&t, jiffies + 1); 2096 } 2097 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2098 schedule_timeout_interruptible(HZ); 2099 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2100 torture_hrtimeout_us(500, 1000, &rand); 2101 lastsleep = jiffies + 10; 2102 } 2103 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2104 schedule_timeout_interruptible(HZ / 5); 2105 stutter_wait("rcu_torture_reader"); 2106 } while (!torture_must_stop()); 2107 if (irqreader && cur_ops->irq_capable) { 2108 del_timer_sync(&t); 2109 destroy_timer_on_stack(&t); 2110 } 2111 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2112 torture_kthread_stopping("rcu_torture_reader"); 2113 return 0; 2114 } 2115 2116 /* 2117 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2118 * increase race probabilities and fuzzes the interval between toggling. 2119 */ 2120 static int rcu_nocb_toggle(void *arg) 2121 { 2122 int cpu; 2123 int maxcpu = -1; 2124 int oldnice = task_nice(current); 2125 long r; 2126 DEFINE_TORTURE_RANDOM(rand); 2127 ktime_t toggle_delay; 2128 unsigned long toggle_fuzz; 2129 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2130 2131 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2132 while (!rcu_inkernel_boot_has_ended()) 2133 schedule_timeout_interruptible(HZ / 10); 2134 for_each_possible_cpu(cpu) 2135 maxcpu = cpu; 2136 WARN_ON(maxcpu < 0); 2137 if (toggle_interval > ULONG_MAX) 2138 toggle_fuzz = ULONG_MAX >> 3; 2139 else 2140 toggle_fuzz = toggle_interval >> 3; 2141 if (toggle_fuzz <= 0) 2142 toggle_fuzz = NSEC_PER_USEC; 2143 do { 2144 r = torture_random(&rand); 2145 cpu = (r >> 1) % (maxcpu + 1); 2146 if (r & 0x1) { 2147 rcu_nocb_cpu_offload(cpu); 2148 atomic_long_inc(&n_nocb_offload); 2149 } else { 2150 rcu_nocb_cpu_deoffload(cpu); 2151 atomic_long_inc(&n_nocb_deoffload); 2152 } 2153 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2154 set_current_state(TASK_INTERRUPTIBLE); 2155 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2156 if (stutter_wait("rcu_nocb_toggle")) 2157 sched_set_normal(current, oldnice); 2158 } while (!torture_must_stop()); 2159 torture_kthread_stopping("rcu_nocb_toggle"); 2160 return 0; 2161 } 2162 2163 /* 2164 * Print torture statistics. Caller must ensure that there is only 2165 * one call to this function at a given time!!! This is normally 2166 * accomplished by relying on the module system to only have one copy 2167 * of the module loaded, and then by giving the rcu_torture_stats 2168 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2169 * thread is not running). 2170 */ 2171 static void 2172 rcu_torture_stats_print(void) 2173 { 2174 int cpu; 2175 int i; 2176 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2177 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2178 struct rcu_torture *rtcp; 2179 static unsigned long rtcv_snap = ULONG_MAX; 2180 static bool splatted; 2181 struct task_struct *wtp; 2182 2183 for_each_possible_cpu(cpu) { 2184 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2185 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2186 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2187 } 2188 } 2189 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2190 if (pipesummary[i] != 0) 2191 break; 2192 } 2193 2194 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2195 rtcp = rcu_access_pointer(rcu_torture_current); 2196 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2197 rtcp, 2198 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2199 rcu_torture_current_version, 2200 list_empty(&rcu_torture_freelist), 2201 atomic_read(&n_rcu_torture_alloc), 2202 atomic_read(&n_rcu_torture_alloc_fail), 2203 atomic_read(&n_rcu_torture_free)); 2204 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2205 atomic_read(&n_rcu_torture_mberror), 2206 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2207 n_rcu_torture_barrier_error, 2208 n_rcu_torture_boost_ktrerror); 2209 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2210 n_rcu_torture_boost_failure, 2211 n_rcu_torture_boosts, 2212 atomic_long_read(&n_rcu_torture_timers)); 2213 torture_onoff_stats(); 2214 pr_cont("barrier: %ld/%ld:%ld ", 2215 data_race(n_barrier_successes), 2216 data_race(n_barrier_attempts), 2217 data_race(n_rcu_torture_barrier_error)); 2218 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2219 pr_cont("nocb-toggles: %ld:%ld\n", 2220 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2221 2222 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2223 if (atomic_read(&n_rcu_torture_mberror) || 2224 atomic_read(&n_rcu_torture_mbchk_fail) || 2225 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2226 n_rcu_torture_boost_failure || i > 1) { 2227 pr_cont("%s", "!!! "); 2228 atomic_inc(&n_rcu_torture_error); 2229 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2230 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2231 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2232 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2233 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2234 WARN_ON_ONCE(i > 1); // Too-short grace period 2235 } 2236 pr_cont("Reader Pipe: "); 2237 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2238 pr_cont(" %ld", pipesummary[i]); 2239 pr_cont("\n"); 2240 2241 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2242 pr_cont("Reader Batch: "); 2243 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2244 pr_cont(" %ld", batchsummary[i]); 2245 pr_cont("\n"); 2246 2247 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2248 pr_cont("Free-Block Circulation: "); 2249 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2250 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2251 } 2252 pr_cont("\n"); 2253 2254 if (cur_ops->stats) 2255 cur_ops->stats(); 2256 if (rtcv_snap == rcu_torture_current_version && 2257 rcu_access_pointer(rcu_torture_current) && 2258 !rcu_stall_is_suppressed()) { 2259 int __maybe_unused flags = 0; 2260 unsigned long __maybe_unused gp_seq = 0; 2261 2262 rcutorture_get_gp_data(cur_ops->ttype, 2263 &flags, &gp_seq); 2264 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 2265 &flags, &gp_seq); 2266 wtp = READ_ONCE(writer_task); 2267 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2268 rcu_torture_writer_state_getname(), 2269 rcu_torture_writer_state, gp_seq, flags, 2270 wtp == NULL ? ~0U : wtp->__state, 2271 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2272 if (!splatted && wtp) { 2273 sched_show_task(wtp); 2274 splatted = true; 2275 } 2276 if (cur_ops->gp_kthread_dbg) 2277 cur_ops->gp_kthread_dbg(); 2278 rcu_ftrace_dump(DUMP_ALL); 2279 } 2280 rtcv_snap = rcu_torture_current_version; 2281 } 2282 2283 /* 2284 * Periodically prints torture statistics, if periodic statistics printing 2285 * was specified via the stat_interval module parameter. 2286 */ 2287 static int 2288 rcu_torture_stats(void *arg) 2289 { 2290 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2291 do { 2292 schedule_timeout_interruptible(stat_interval * HZ); 2293 rcu_torture_stats_print(); 2294 torture_shutdown_absorb("rcu_torture_stats"); 2295 } while (!torture_must_stop()); 2296 torture_kthread_stopping("rcu_torture_stats"); 2297 return 0; 2298 } 2299 2300 /* Test mem_dump_obj() and friends. */ 2301 static void rcu_torture_mem_dump_obj(void) 2302 { 2303 struct rcu_head *rhp; 2304 struct kmem_cache *kcp; 2305 static int z; 2306 2307 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2308 if (WARN_ON_ONCE(!kcp)) 2309 return; 2310 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2311 if (WARN_ON_ONCE(!rhp)) { 2312 kmem_cache_destroy(kcp); 2313 return; 2314 } 2315 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2316 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2317 mem_dump_obj(ZERO_SIZE_PTR); 2318 pr_alert("mem_dump_obj(NULL):"); 2319 mem_dump_obj(NULL); 2320 pr_alert("mem_dump_obj(%px):", &rhp); 2321 mem_dump_obj(&rhp); 2322 pr_alert("mem_dump_obj(%px):", rhp); 2323 mem_dump_obj(rhp); 2324 pr_alert("mem_dump_obj(%px):", &rhp->func); 2325 mem_dump_obj(&rhp->func); 2326 pr_alert("mem_dump_obj(%px):", &z); 2327 mem_dump_obj(&z); 2328 kmem_cache_free(kcp, rhp); 2329 kmem_cache_destroy(kcp); 2330 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2331 if (WARN_ON_ONCE(!rhp)) 2332 return; 2333 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2334 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2335 mem_dump_obj(rhp); 2336 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2337 mem_dump_obj(&rhp->func); 2338 kfree(rhp); 2339 rhp = vmalloc(4096); 2340 if (WARN_ON_ONCE(!rhp)) 2341 return; 2342 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2343 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2344 mem_dump_obj(rhp); 2345 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2346 mem_dump_obj(&rhp->func); 2347 vfree(rhp); 2348 } 2349 2350 static void 2351 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2352 { 2353 pr_alert("%s" TORTURE_FLAG 2354 "--- %s: nreaders=%d nfakewriters=%d " 2355 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2356 "shuffle_interval=%d stutter=%d irqreader=%d " 2357 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2358 "test_boost=%d/%d test_boost_interval=%d " 2359 "test_boost_duration=%d shutdown_secs=%d " 2360 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2361 "stall_cpu_block=%d " 2362 "n_barrier_cbs=%d " 2363 "onoff_interval=%d onoff_holdoff=%d " 2364 "read_exit_delay=%d read_exit_burst=%d " 2365 "nocbs_nthreads=%d nocbs_toggle=%d " 2366 "test_nmis=%d\n", 2367 torture_type, tag, nrealreaders, nfakewriters, 2368 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2369 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2370 test_boost, cur_ops->can_boost, 2371 test_boost_interval, test_boost_duration, shutdown_secs, 2372 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2373 stall_cpu_block, 2374 n_barrier_cbs, 2375 onoff_interval, onoff_holdoff, 2376 read_exit_delay, read_exit_burst, 2377 nocbs_nthreads, nocbs_toggle, 2378 test_nmis); 2379 } 2380 2381 static int rcutorture_booster_cleanup(unsigned int cpu) 2382 { 2383 struct task_struct *t; 2384 2385 if (boost_tasks[cpu] == NULL) 2386 return 0; 2387 mutex_lock(&boost_mutex); 2388 t = boost_tasks[cpu]; 2389 boost_tasks[cpu] = NULL; 2390 rcu_torture_enable_rt_throttle(); 2391 mutex_unlock(&boost_mutex); 2392 2393 /* This must be outside of the mutex, otherwise deadlock! */ 2394 torture_stop_kthread(rcu_torture_boost, t); 2395 return 0; 2396 } 2397 2398 static int rcutorture_booster_init(unsigned int cpu) 2399 { 2400 int retval; 2401 2402 if (boost_tasks[cpu] != NULL) 2403 return 0; /* Already created, nothing more to do. */ 2404 2405 // Testing RCU priority boosting requires rcutorture do 2406 // some serious abuse. Counter this by running ksoftirqd 2407 // at higher priority. 2408 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2409 struct sched_param sp; 2410 struct task_struct *t; 2411 2412 t = per_cpu(ksoftirqd, cpu); 2413 WARN_ON_ONCE(!t); 2414 sp.sched_priority = 2; 2415 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2416 } 2417 2418 /* Don't allow time recalculation while creating a new task. */ 2419 mutex_lock(&boost_mutex); 2420 rcu_torture_disable_rt_throttle(); 2421 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2422 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2423 cpu, "rcu_torture_boost_%u"); 2424 if (IS_ERR(boost_tasks[cpu])) { 2425 retval = PTR_ERR(boost_tasks[cpu]); 2426 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2427 n_rcu_torture_boost_ktrerror++; 2428 boost_tasks[cpu] = NULL; 2429 mutex_unlock(&boost_mutex); 2430 return retval; 2431 } 2432 mutex_unlock(&boost_mutex); 2433 return 0; 2434 } 2435 2436 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2437 { 2438 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2439 return NOTIFY_OK; 2440 } 2441 2442 static struct notifier_block rcu_torture_stall_block = { 2443 .notifier_call = rcu_torture_stall_nf, 2444 }; 2445 2446 /* 2447 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2448 * induces a CPU stall for the time specified by stall_cpu. If a new 2449 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 2450 */ 2451 static int rcu_torture_stall(void *args) 2452 { 2453 int idx; 2454 int ret; 2455 unsigned long stop_at; 2456 2457 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2458 if (rcu_cpu_stall_notifiers) { 2459 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 2460 if (ret) 2461 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 2462 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 2463 } 2464 if (stall_cpu_holdoff > 0) { 2465 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2466 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2467 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2468 } 2469 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2470 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2471 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2472 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2473 if (kthread_should_stop()) 2474 break; 2475 schedule_timeout_uninterruptible(HZ); 2476 } 2477 } 2478 if (!kthread_should_stop() && stall_cpu > 0) { 2479 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2480 stop_at = ktime_get_seconds() + stall_cpu; 2481 /* RCU CPU stall is expected behavior in following code. */ 2482 idx = cur_ops->readlock(); 2483 if (stall_cpu_irqsoff) 2484 local_irq_disable(); 2485 else if (!stall_cpu_block) 2486 preempt_disable(); 2487 pr_alert("%s start on CPU %d.\n", 2488 __func__, raw_smp_processor_id()); 2489 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 2490 stop_at)) 2491 if (stall_cpu_block) { 2492 #ifdef CONFIG_PREEMPTION 2493 preempt_schedule(); 2494 #else 2495 schedule_timeout_uninterruptible(HZ); 2496 #endif 2497 } else if (stall_no_softlockup) { 2498 touch_softlockup_watchdog(); 2499 } 2500 if (stall_cpu_irqsoff) 2501 local_irq_enable(); 2502 else if (!stall_cpu_block) 2503 preempt_enable(); 2504 cur_ops->readunlock(idx); 2505 } 2506 pr_alert("%s end.\n", __func__); 2507 if (rcu_cpu_stall_notifiers && !ret) { 2508 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 2509 if (ret) 2510 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 2511 } 2512 torture_shutdown_absorb("rcu_torture_stall"); 2513 while (!kthread_should_stop()) 2514 schedule_timeout_interruptible(10 * HZ); 2515 return 0; 2516 } 2517 2518 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2519 static int __init rcu_torture_stall_init(void) 2520 { 2521 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2522 return 0; 2523 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2524 } 2525 2526 /* State structure for forward-progress self-propagating RCU callback. */ 2527 struct fwd_cb_state { 2528 struct rcu_head rh; 2529 int stop; 2530 }; 2531 2532 /* 2533 * Forward-progress self-propagating RCU callback function. Because 2534 * callbacks run from softirq, this function is an implicit RCU read-side 2535 * critical section. 2536 */ 2537 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2538 { 2539 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2540 2541 if (READ_ONCE(fcsp->stop)) { 2542 WRITE_ONCE(fcsp->stop, 2); 2543 return; 2544 } 2545 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2546 } 2547 2548 /* State for continuous-flood RCU callbacks. */ 2549 struct rcu_fwd_cb { 2550 struct rcu_head rh; 2551 struct rcu_fwd_cb *rfc_next; 2552 struct rcu_fwd *rfc_rfp; 2553 int rfc_gps; 2554 }; 2555 2556 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2557 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2558 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2559 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2560 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2561 2562 struct rcu_launder_hist { 2563 long n_launders; 2564 unsigned long launder_gp_seq; 2565 }; 2566 2567 struct rcu_fwd { 2568 spinlock_t rcu_fwd_lock; 2569 struct rcu_fwd_cb *rcu_fwd_cb_head; 2570 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2571 long n_launders_cb; 2572 unsigned long rcu_fwd_startat; 2573 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2574 unsigned long rcu_launder_gp_seq_start; 2575 int rcu_fwd_id; 2576 }; 2577 2578 static DEFINE_MUTEX(rcu_fwd_mutex); 2579 static struct rcu_fwd *rcu_fwds; 2580 static unsigned long rcu_fwd_seq; 2581 static atomic_long_t rcu_fwd_max_cbs; 2582 static bool rcu_fwd_emergency_stop; 2583 2584 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2585 { 2586 unsigned long gps; 2587 unsigned long gps_old; 2588 int i; 2589 int j; 2590 2591 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2592 if (rfp->n_launders_hist[i].n_launders > 0) 2593 break; 2594 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2595 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2596 gps_old = rfp->rcu_launder_gp_seq_start; 2597 for (j = 0; j <= i; j++) { 2598 gps = rfp->n_launders_hist[j].launder_gp_seq; 2599 pr_cont(" %ds/%d: %ld:%ld", 2600 j + 1, FWD_CBS_HIST_DIV, 2601 rfp->n_launders_hist[j].n_launders, 2602 rcutorture_seq_diff(gps, gps_old)); 2603 gps_old = gps; 2604 } 2605 pr_cont("\n"); 2606 } 2607 2608 /* Callback function for continuous-flood RCU callbacks. */ 2609 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2610 { 2611 unsigned long flags; 2612 int i; 2613 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2614 struct rcu_fwd_cb **rfcpp; 2615 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2616 2617 rfcp->rfc_next = NULL; 2618 rfcp->rfc_gps++; 2619 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2620 rfcpp = rfp->rcu_fwd_cb_tail; 2621 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2622 WRITE_ONCE(*rfcpp, rfcp); 2623 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2624 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2625 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2626 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2627 rfp->n_launders_hist[i].n_launders++; 2628 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2629 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2630 } 2631 2632 // Give the scheduler a chance, even on nohz_full CPUs. 2633 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2634 { 2635 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2636 // Real call_rcu() floods hit userspace, so emulate that. 2637 if (need_resched() || (iter & 0xfff)) 2638 schedule(); 2639 return; 2640 } 2641 // No userspace emulation: CB invocation throttles call_rcu() 2642 cond_resched(); 2643 } 2644 2645 /* 2646 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2647 * test is over or because we hit an OOM event. 2648 */ 2649 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2650 { 2651 unsigned long flags; 2652 unsigned long freed = 0; 2653 struct rcu_fwd_cb *rfcp; 2654 2655 for (;;) { 2656 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2657 rfcp = rfp->rcu_fwd_cb_head; 2658 if (!rfcp) { 2659 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2660 break; 2661 } 2662 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2663 if (!rfp->rcu_fwd_cb_head) 2664 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2665 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2666 kfree(rfcp); 2667 freed++; 2668 rcu_torture_fwd_prog_cond_resched(freed); 2669 if (tick_nohz_full_enabled()) { 2670 local_irq_save(flags); 2671 rcu_momentary_dyntick_idle(); 2672 local_irq_restore(flags); 2673 } 2674 } 2675 return freed; 2676 } 2677 2678 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2679 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2680 int *tested, int *tested_tries) 2681 { 2682 unsigned long cver; 2683 unsigned long dur; 2684 struct fwd_cb_state fcs; 2685 unsigned long gps; 2686 int idx; 2687 int sd; 2688 int sd4; 2689 bool selfpropcb = false; 2690 unsigned long stopat; 2691 static DEFINE_TORTURE_RANDOM(trs); 2692 2693 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2694 if (!cur_ops->sync) 2695 return; // Cannot do need_resched() forward progress testing without ->sync. 2696 if (cur_ops->call && cur_ops->cb_barrier) { 2697 init_rcu_head_on_stack(&fcs.rh); 2698 selfpropcb = true; 2699 } 2700 2701 /* Tight loop containing cond_resched(). */ 2702 atomic_inc(&rcu_fwd_cb_nodelay); 2703 cur_ops->sync(); /* Later readers see above write. */ 2704 if (selfpropcb) { 2705 WRITE_ONCE(fcs.stop, 0); 2706 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2707 } 2708 cver = READ_ONCE(rcu_torture_current_version); 2709 gps = cur_ops->get_gp_seq(); 2710 sd = cur_ops->stall_dur() + 1; 2711 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2712 dur = sd4 + torture_random(&trs) % (sd - sd4); 2713 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2714 stopat = rfp->rcu_fwd_startat + dur; 2715 while (time_before(jiffies, stopat) && 2716 !shutdown_time_arrived() && 2717 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2718 idx = cur_ops->readlock(); 2719 udelay(10); 2720 cur_ops->readunlock(idx); 2721 if (!fwd_progress_need_resched || need_resched()) 2722 cond_resched(); 2723 } 2724 (*tested_tries)++; 2725 if (!time_before(jiffies, stopat) && 2726 !shutdown_time_arrived() && 2727 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2728 (*tested)++; 2729 cver = READ_ONCE(rcu_torture_current_version) - cver; 2730 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2731 WARN_ON(!cver && gps < 2); 2732 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2733 rfp->rcu_fwd_id, dur, cver, gps); 2734 } 2735 if (selfpropcb) { 2736 WRITE_ONCE(fcs.stop, 1); 2737 cur_ops->sync(); /* Wait for running CB to complete. */ 2738 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2739 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2740 } 2741 2742 if (selfpropcb) { 2743 WARN_ON(READ_ONCE(fcs.stop) != 2); 2744 destroy_rcu_head_on_stack(&fcs.rh); 2745 } 2746 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2747 atomic_dec(&rcu_fwd_cb_nodelay); 2748 } 2749 2750 /* Carry out call_rcu() forward-progress testing. */ 2751 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2752 { 2753 unsigned long cver; 2754 unsigned long flags; 2755 unsigned long gps; 2756 int i; 2757 long n_launders; 2758 long n_launders_cb_snap; 2759 long n_launders_sa; 2760 long n_max_cbs; 2761 long n_max_gps; 2762 struct rcu_fwd_cb *rfcp; 2763 struct rcu_fwd_cb *rfcpn; 2764 unsigned long stopat; 2765 unsigned long stoppedat; 2766 2767 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2768 if (READ_ONCE(rcu_fwd_emergency_stop)) 2769 return; /* Get out of the way quickly, no GP wait! */ 2770 if (!cur_ops->call) 2771 return; /* Can't do call_rcu() fwd prog without ->call. */ 2772 2773 /* Loop continuously posting RCU callbacks. */ 2774 atomic_inc(&rcu_fwd_cb_nodelay); 2775 cur_ops->sync(); /* Later readers see above write. */ 2776 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2777 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2778 n_launders = 0; 2779 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2780 n_launders_sa = 0; 2781 n_max_cbs = 0; 2782 n_max_gps = 0; 2783 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2784 rfp->n_launders_hist[i].n_launders = 0; 2785 cver = READ_ONCE(rcu_torture_current_version); 2786 gps = cur_ops->get_gp_seq(); 2787 rfp->rcu_launder_gp_seq_start = gps; 2788 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2789 while (time_before(jiffies, stopat) && 2790 !shutdown_time_arrived() && 2791 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2792 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2793 rfcpn = NULL; 2794 if (rfcp) 2795 rfcpn = READ_ONCE(rfcp->rfc_next); 2796 if (rfcpn) { 2797 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2798 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2799 break; 2800 rfp->rcu_fwd_cb_head = rfcpn; 2801 n_launders++; 2802 n_launders_sa++; 2803 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2804 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2805 if (WARN_ON_ONCE(!rfcp)) { 2806 schedule_timeout_interruptible(1); 2807 continue; 2808 } 2809 n_max_cbs++; 2810 n_launders_sa = 0; 2811 rfcp->rfc_gps = 0; 2812 rfcp->rfc_rfp = rfp; 2813 } else { 2814 rfcp = NULL; 2815 } 2816 if (rfcp) 2817 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2818 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2819 if (tick_nohz_full_enabled()) { 2820 local_irq_save(flags); 2821 rcu_momentary_dyntick_idle(); 2822 local_irq_restore(flags); 2823 } 2824 } 2825 stoppedat = jiffies; 2826 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2827 cver = READ_ONCE(rcu_torture_current_version) - cver; 2828 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2829 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2830 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2831 (void)rcu_torture_fwd_prog_cbfree(rfp); 2832 2833 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2834 !shutdown_time_arrived()) { 2835 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2836 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2837 __func__, 2838 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2839 n_launders + n_max_cbs - n_launders_cb_snap, 2840 n_launders, n_launders_sa, 2841 n_max_gps, n_max_cbs, cver, gps); 2842 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2843 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2844 rcu_torture_fwd_cb_hist(rfp); 2845 mutex_unlock(&rcu_fwd_mutex); 2846 } 2847 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2848 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2849 atomic_dec(&rcu_fwd_cb_nodelay); 2850 } 2851 2852 2853 /* 2854 * OOM notifier, but this only prints diagnostic information for the 2855 * current forward-progress test. 2856 */ 2857 static int rcutorture_oom_notify(struct notifier_block *self, 2858 unsigned long notused, void *nfreed) 2859 { 2860 int i; 2861 long ncbs; 2862 struct rcu_fwd *rfp; 2863 2864 mutex_lock(&rcu_fwd_mutex); 2865 rfp = rcu_fwds; 2866 if (!rfp) { 2867 mutex_unlock(&rcu_fwd_mutex); 2868 return NOTIFY_OK; 2869 } 2870 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2871 __func__); 2872 for (i = 0; i < fwd_progress; i++) { 2873 rcu_torture_fwd_cb_hist(&rfp[i]); 2874 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2875 } 2876 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2877 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2878 ncbs = 0; 2879 for (i = 0; i < fwd_progress; i++) 2880 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2881 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2882 cur_ops->cb_barrier(); 2883 ncbs = 0; 2884 for (i = 0; i < fwd_progress; i++) 2885 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2886 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2887 cur_ops->cb_barrier(); 2888 ncbs = 0; 2889 for (i = 0; i < fwd_progress; i++) 2890 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2891 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2892 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2893 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2894 pr_info("%s returning after OOM processing.\n", __func__); 2895 mutex_unlock(&rcu_fwd_mutex); 2896 return NOTIFY_OK; 2897 } 2898 2899 static struct notifier_block rcutorture_oom_nb = { 2900 .notifier_call = rcutorture_oom_notify 2901 }; 2902 2903 /* Carry out grace-period forward-progress testing. */ 2904 static int rcu_torture_fwd_prog(void *args) 2905 { 2906 bool firsttime = true; 2907 long max_cbs; 2908 int oldnice = task_nice(current); 2909 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2910 struct rcu_fwd *rfp = args; 2911 int tested = 0; 2912 int tested_tries = 0; 2913 2914 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2915 rcu_bind_current_to_nocb(); 2916 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2917 set_user_nice(current, MAX_NICE); 2918 do { 2919 if (!rfp->rcu_fwd_id) { 2920 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2921 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2922 if (!firsttime) { 2923 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2924 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2925 } 2926 firsttime = false; 2927 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2928 } else { 2929 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2930 schedule_timeout_interruptible(HZ / 20); 2931 oldseq = READ_ONCE(rcu_fwd_seq); 2932 } 2933 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2934 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2935 rcu_torture_fwd_prog_cr(rfp); 2936 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2937 (!IS_ENABLED(CONFIG_TINY_RCU) || 2938 (rcu_inkernel_boot_has_ended() && 2939 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2940 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2941 2942 /* Avoid slow periods, better to test when busy. */ 2943 if (stutter_wait("rcu_torture_fwd_prog")) 2944 sched_set_normal(current, oldnice); 2945 } while (!torture_must_stop()); 2946 /* Short runs might not contain a valid forward-progress attempt. */ 2947 if (!rfp->rcu_fwd_id) { 2948 WARN_ON(!tested && tested_tries >= 5); 2949 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2950 } 2951 torture_kthread_stopping("rcu_torture_fwd_prog"); 2952 return 0; 2953 } 2954 2955 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2956 static int __init rcu_torture_fwd_prog_init(void) 2957 { 2958 int i; 2959 int ret = 0; 2960 struct rcu_fwd *rfp; 2961 2962 if (!fwd_progress) 2963 return 0; /* Not requested, so don't do it. */ 2964 if (fwd_progress >= nr_cpu_ids) { 2965 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2966 fwd_progress = nr_cpu_ids; 2967 } else if (fwd_progress < 0) { 2968 fwd_progress = nr_cpu_ids; 2969 } 2970 if ((!cur_ops->sync && !cur_ops->call) || 2971 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2972 cur_ops == &rcu_busted_ops) { 2973 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2974 fwd_progress = 0; 2975 return 0; 2976 } 2977 if (stall_cpu > 0) { 2978 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2979 fwd_progress = 0; 2980 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2981 return -EINVAL; /* In module, can fail back to user. */ 2982 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2983 return 0; 2984 } 2985 if (fwd_progress_holdoff <= 0) 2986 fwd_progress_holdoff = 1; 2987 if (fwd_progress_div <= 0) 2988 fwd_progress_div = 4; 2989 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 2990 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 2991 if (!rfp || !fwd_prog_tasks) { 2992 kfree(rfp); 2993 kfree(fwd_prog_tasks); 2994 fwd_prog_tasks = NULL; 2995 fwd_progress = 0; 2996 return -ENOMEM; 2997 } 2998 for (i = 0; i < fwd_progress; i++) { 2999 spin_lock_init(&rfp[i].rcu_fwd_lock); 3000 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3001 rfp[i].rcu_fwd_id = i; 3002 } 3003 mutex_lock(&rcu_fwd_mutex); 3004 rcu_fwds = rfp; 3005 mutex_unlock(&rcu_fwd_mutex); 3006 register_oom_notifier(&rcutorture_oom_nb); 3007 for (i = 0; i < fwd_progress; i++) { 3008 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3009 if (ret) { 3010 fwd_progress = i; 3011 return ret; 3012 } 3013 } 3014 return 0; 3015 } 3016 3017 static void rcu_torture_fwd_prog_cleanup(void) 3018 { 3019 int i; 3020 struct rcu_fwd *rfp; 3021 3022 if (!rcu_fwds || !fwd_prog_tasks) 3023 return; 3024 for (i = 0; i < fwd_progress; i++) 3025 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3026 unregister_oom_notifier(&rcutorture_oom_nb); 3027 mutex_lock(&rcu_fwd_mutex); 3028 rfp = rcu_fwds; 3029 rcu_fwds = NULL; 3030 mutex_unlock(&rcu_fwd_mutex); 3031 kfree(rfp); 3032 kfree(fwd_prog_tasks); 3033 fwd_prog_tasks = NULL; 3034 } 3035 3036 /* Callback function for RCU barrier testing. */ 3037 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3038 { 3039 atomic_inc(&barrier_cbs_invoked); 3040 } 3041 3042 /* IPI handler to get callback posted on desired CPU, if online. */ 3043 static void rcu_torture_barrier1cb(void *rcu_void) 3044 { 3045 struct rcu_head *rhp = rcu_void; 3046 3047 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3048 } 3049 3050 /* kthread function to register callbacks used to test RCU barriers. */ 3051 static int rcu_torture_barrier_cbs(void *arg) 3052 { 3053 long myid = (long)arg; 3054 bool lastphase = false; 3055 bool newphase; 3056 struct rcu_head rcu; 3057 3058 init_rcu_head_on_stack(&rcu); 3059 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3060 set_user_nice(current, MAX_NICE); 3061 do { 3062 wait_event(barrier_cbs_wq[myid], 3063 (newphase = 3064 smp_load_acquire(&barrier_phase)) != lastphase || 3065 torture_must_stop()); 3066 lastphase = newphase; 3067 if (torture_must_stop()) 3068 break; 3069 /* 3070 * The above smp_load_acquire() ensures barrier_phase load 3071 * is ordered before the following ->call(). 3072 */ 3073 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 3074 &rcu, 1)) { 3075 // IPI failed, so use direct call from current CPU. 3076 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3077 } 3078 if (atomic_dec_and_test(&barrier_cbs_count)) 3079 wake_up(&barrier_wq); 3080 } while (!torture_must_stop()); 3081 if (cur_ops->cb_barrier != NULL) 3082 cur_ops->cb_barrier(); 3083 destroy_rcu_head_on_stack(&rcu); 3084 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3085 return 0; 3086 } 3087 3088 /* kthread function to drive and coordinate RCU barrier testing. */ 3089 static int rcu_torture_barrier(void *arg) 3090 { 3091 int i; 3092 3093 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3094 do { 3095 atomic_set(&barrier_cbs_invoked, 0); 3096 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3097 /* Ensure barrier_phase ordered after prior assignments. */ 3098 smp_store_release(&barrier_phase, !barrier_phase); 3099 for (i = 0; i < n_barrier_cbs; i++) 3100 wake_up(&barrier_cbs_wq[i]); 3101 wait_event(barrier_wq, 3102 atomic_read(&barrier_cbs_count) == 0 || 3103 torture_must_stop()); 3104 if (torture_must_stop()) 3105 break; 3106 n_barrier_attempts++; 3107 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3108 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3109 n_rcu_torture_barrier_error++; 3110 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3111 atomic_read(&barrier_cbs_invoked), 3112 n_barrier_cbs); 3113 WARN_ON(1); 3114 // Wait manually for the remaining callbacks 3115 i = 0; 3116 do { 3117 if (WARN_ON(i++ > HZ)) 3118 i = INT_MIN; 3119 schedule_timeout_interruptible(1); 3120 cur_ops->cb_barrier(); 3121 } while (atomic_read(&barrier_cbs_invoked) != 3122 n_barrier_cbs && 3123 !torture_must_stop()); 3124 smp_mb(); // Can't trust ordering if broken. 3125 if (!torture_must_stop()) 3126 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3127 atomic_read(&barrier_cbs_invoked)); 3128 } else { 3129 n_barrier_successes++; 3130 } 3131 schedule_timeout_interruptible(HZ / 10); 3132 } while (!torture_must_stop()); 3133 torture_kthread_stopping("rcu_torture_barrier"); 3134 return 0; 3135 } 3136 3137 /* Initialize RCU barrier testing. */ 3138 static int rcu_torture_barrier_init(void) 3139 { 3140 int i; 3141 int ret; 3142 3143 if (n_barrier_cbs <= 0) 3144 return 0; 3145 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3146 pr_alert("%s" TORTURE_FLAG 3147 " Call or barrier ops missing for %s,\n", 3148 torture_type, cur_ops->name); 3149 pr_alert("%s" TORTURE_FLAG 3150 " RCU barrier testing omitted from run.\n", 3151 torture_type); 3152 return 0; 3153 } 3154 atomic_set(&barrier_cbs_count, 0); 3155 atomic_set(&barrier_cbs_invoked, 0); 3156 barrier_cbs_tasks = 3157 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3158 GFP_KERNEL); 3159 barrier_cbs_wq = 3160 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3161 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3162 return -ENOMEM; 3163 for (i = 0; i < n_barrier_cbs; i++) { 3164 init_waitqueue_head(&barrier_cbs_wq[i]); 3165 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3166 (void *)(long)i, 3167 barrier_cbs_tasks[i]); 3168 if (ret) 3169 return ret; 3170 } 3171 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3172 } 3173 3174 /* Clean up after RCU barrier testing. */ 3175 static void rcu_torture_barrier_cleanup(void) 3176 { 3177 int i; 3178 3179 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3180 if (barrier_cbs_tasks != NULL) { 3181 for (i = 0; i < n_barrier_cbs; i++) 3182 torture_stop_kthread(rcu_torture_barrier_cbs, 3183 barrier_cbs_tasks[i]); 3184 kfree(barrier_cbs_tasks); 3185 barrier_cbs_tasks = NULL; 3186 } 3187 if (barrier_cbs_wq != NULL) { 3188 kfree(barrier_cbs_wq); 3189 barrier_cbs_wq = NULL; 3190 } 3191 } 3192 3193 static bool rcu_torture_can_boost(void) 3194 { 3195 static int boost_warn_once; 3196 int prio; 3197 3198 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3199 return false; 3200 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3201 return false; 3202 3203 prio = rcu_get_gp_kthreads_prio(); 3204 if (!prio) 3205 return false; 3206 3207 if (prio < 2) { 3208 if (boost_warn_once == 1) 3209 return false; 3210 3211 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3212 boost_warn_once = 1; 3213 return false; 3214 } 3215 3216 return true; 3217 } 3218 3219 static bool read_exit_child_stop; 3220 static bool read_exit_child_stopped; 3221 static wait_queue_head_t read_exit_wq; 3222 3223 // Child kthread which just does an rcutorture reader and exits. 3224 static int rcu_torture_read_exit_child(void *trsp_in) 3225 { 3226 struct torture_random_state *trsp = trsp_in; 3227 3228 set_user_nice(current, MAX_NICE); 3229 // Minimize time between reading and exiting. 3230 while (!kthread_should_stop()) 3231 schedule_timeout_uninterruptible(HZ / 20); 3232 (void)rcu_torture_one_read(trsp, -1); 3233 return 0; 3234 } 3235 3236 // Parent kthread which creates and destroys read-exit child kthreads. 3237 static int rcu_torture_read_exit(void *unused) 3238 { 3239 bool errexit = false; 3240 int i; 3241 struct task_struct *tsp; 3242 DEFINE_TORTURE_RANDOM(trs); 3243 3244 // Allocate and initialize. 3245 set_user_nice(current, MAX_NICE); 3246 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3247 3248 // Each pass through this loop does one read-exit episode. 3249 do { 3250 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3251 for (i = 0; i < read_exit_burst; i++) { 3252 if (READ_ONCE(read_exit_child_stop)) 3253 break; 3254 stutter_wait("rcu_torture_read_exit"); 3255 // Spawn child. 3256 tsp = kthread_run(rcu_torture_read_exit_child, 3257 &trs, "%s", "rcu_torture_read_exit_child"); 3258 if (IS_ERR(tsp)) { 3259 TOROUT_ERRSTRING("out of memory"); 3260 errexit = true; 3261 break; 3262 } 3263 cond_resched(); 3264 kthread_stop(tsp); 3265 n_read_exits++; 3266 } 3267 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3268 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3269 i = 0; 3270 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3271 schedule_timeout_uninterruptible(HZ); 3272 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3273 3274 // Clean up and exit. 3275 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3276 smp_mb(); // Store before wakeup. 3277 wake_up(&read_exit_wq); 3278 while (!torture_must_stop()) 3279 schedule_timeout_uninterruptible(HZ / 20); 3280 torture_kthread_stopping("rcu_torture_read_exit"); 3281 return 0; 3282 } 3283 3284 static int rcu_torture_read_exit_init(void) 3285 { 3286 if (read_exit_burst <= 0) 3287 return 0; 3288 init_waitqueue_head(&read_exit_wq); 3289 read_exit_child_stop = false; 3290 read_exit_child_stopped = false; 3291 return torture_create_kthread(rcu_torture_read_exit, NULL, 3292 read_exit_task); 3293 } 3294 3295 static void rcu_torture_read_exit_cleanup(void) 3296 { 3297 if (!read_exit_task) 3298 return; 3299 WRITE_ONCE(read_exit_child_stop, true); 3300 smp_mb(); // Above write before wait. 3301 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3302 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3303 } 3304 3305 static void rcutorture_test_nmis(int n) 3306 { 3307 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3308 int cpu; 3309 int dumpcpu; 3310 int i; 3311 3312 for (i = 0; i < n; i++) { 3313 preempt_disable(); 3314 cpu = smp_processor_id(); 3315 dumpcpu = cpu + 1; 3316 if (dumpcpu >= nr_cpu_ids) 3317 dumpcpu = 0; 3318 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3319 dump_cpu_task(dumpcpu); 3320 preempt_enable(); 3321 schedule_timeout_uninterruptible(15 * HZ); 3322 } 3323 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3324 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3325 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3326 } 3327 3328 static enum cpuhp_state rcutor_hp; 3329 3330 static void 3331 rcu_torture_cleanup(void) 3332 { 3333 int firsttime; 3334 int flags = 0; 3335 unsigned long gp_seq = 0; 3336 int i; 3337 3338 if (torture_cleanup_begin()) { 3339 if (cur_ops->cb_barrier != NULL) { 3340 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3341 cur_ops->cb_barrier(); 3342 } 3343 rcu_gp_slow_unregister(NULL); 3344 return; 3345 } 3346 if (!cur_ops) { 3347 torture_cleanup_end(); 3348 rcu_gp_slow_unregister(NULL); 3349 return; 3350 } 3351 3352 rcutorture_test_nmis(test_nmis); 3353 3354 if (cur_ops->gp_kthread_dbg) 3355 cur_ops->gp_kthread_dbg(); 3356 rcu_torture_read_exit_cleanup(); 3357 rcu_torture_barrier_cleanup(); 3358 rcu_torture_fwd_prog_cleanup(); 3359 torture_stop_kthread(rcu_torture_stall, stall_task); 3360 torture_stop_kthread(rcu_torture_writer, writer_task); 3361 3362 if (nocb_tasks) { 3363 for (i = 0; i < nrealnocbers; i++) 3364 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3365 kfree(nocb_tasks); 3366 nocb_tasks = NULL; 3367 } 3368 3369 if (reader_tasks) { 3370 for (i = 0; i < nrealreaders; i++) 3371 torture_stop_kthread(rcu_torture_reader, 3372 reader_tasks[i]); 3373 kfree(reader_tasks); 3374 reader_tasks = NULL; 3375 } 3376 kfree(rcu_torture_reader_mbchk); 3377 rcu_torture_reader_mbchk = NULL; 3378 3379 if (fakewriter_tasks) { 3380 for (i = 0; i < nfakewriters; i++) 3381 torture_stop_kthread(rcu_torture_fakewriter, 3382 fakewriter_tasks[i]); 3383 kfree(fakewriter_tasks); 3384 fakewriter_tasks = NULL; 3385 } 3386 3387 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3388 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3389 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3390 cur_ops->name, (long)gp_seq, flags, 3391 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3392 torture_stop_kthread(rcu_torture_stats, stats_task); 3393 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3394 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3395 cpuhp_remove_state(rcutor_hp); 3396 3397 /* 3398 * Wait for all RCU callbacks to fire, then do torture-type-specific 3399 * cleanup operations. 3400 */ 3401 if (cur_ops->cb_barrier != NULL) { 3402 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3403 cur_ops->cb_barrier(); 3404 } 3405 if (cur_ops->cleanup != NULL) 3406 cur_ops->cleanup(); 3407 3408 rcu_torture_mem_dump_obj(); 3409 3410 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3411 3412 if (err_segs_recorded) { 3413 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3414 if (rt_read_nsegs == 0) 3415 pr_alert("\t: No segments recorded!!!\n"); 3416 firsttime = 1; 3417 for (i = 0; i < rt_read_nsegs; i++) { 3418 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3419 if (err_segs[i].rt_delay_jiffies != 0) { 3420 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3421 err_segs[i].rt_delay_jiffies); 3422 firsttime = 0; 3423 } 3424 if (err_segs[i].rt_delay_ms != 0) { 3425 pr_cont("%s%ldms", firsttime ? "" : "+", 3426 err_segs[i].rt_delay_ms); 3427 firsttime = 0; 3428 } 3429 if (err_segs[i].rt_delay_us != 0) { 3430 pr_cont("%s%ldus", firsttime ? "" : "+", 3431 err_segs[i].rt_delay_us); 3432 firsttime = 0; 3433 } 3434 pr_cont("%s\n", 3435 err_segs[i].rt_preempted ? "preempted" : ""); 3436 3437 } 3438 } 3439 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3440 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3441 else if (torture_onoff_failures()) 3442 rcu_torture_print_module_parms(cur_ops, 3443 "End of test: RCU_HOTPLUG"); 3444 else 3445 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3446 torture_cleanup_end(); 3447 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay); 3448 } 3449 3450 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3451 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3452 { 3453 } 3454 3455 static void rcu_torture_err_cb(struct rcu_head *rhp) 3456 { 3457 /* 3458 * This -might- happen due to race conditions, but is unlikely. 3459 * The scenario that leads to this happening is that the 3460 * first of the pair of duplicate callbacks is queued, 3461 * someone else starts a grace period that includes that 3462 * callback, then the second of the pair must wait for the 3463 * next grace period. Unlikely, but can happen. If it 3464 * does happen, the debug-objects subsystem won't have splatted. 3465 */ 3466 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3467 } 3468 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3469 3470 /* 3471 * Verify that double-free causes debug-objects to complain, but only 3472 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3473 * cannot be carried out. 3474 */ 3475 static void rcu_test_debug_objects(void) 3476 { 3477 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3478 struct rcu_head rh1; 3479 struct rcu_head rh2; 3480 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3481 3482 init_rcu_head_on_stack(&rh1); 3483 init_rcu_head_on_stack(&rh2); 3484 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 3485 3486 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3487 preempt_disable(); /* Prevent preemption from interrupting test. */ 3488 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 3489 call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3490 local_irq_disable(); /* Make it harder to start a new grace period. */ 3491 call_rcu_hurry(&rh2, rcu_torture_leak_cb); 3492 call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3493 if (rhp) { 3494 call_rcu_hurry(rhp, rcu_torture_leak_cb); 3495 call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3496 } 3497 local_irq_enable(); 3498 rcu_read_unlock(); 3499 preempt_enable(); 3500 3501 /* Wait for them all to get done so we can safely return. */ 3502 rcu_barrier(); 3503 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 3504 destroy_rcu_head_on_stack(&rh1); 3505 destroy_rcu_head_on_stack(&rh2); 3506 kfree(rhp); 3507 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3508 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 3509 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3510 } 3511 3512 static void rcutorture_sync(void) 3513 { 3514 static unsigned long n; 3515 3516 if (cur_ops->sync && !(++n & 0xfff)) 3517 cur_ops->sync(); 3518 } 3519 3520 static DEFINE_MUTEX(mut0); 3521 static DEFINE_MUTEX(mut1); 3522 static DEFINE_MUTEX(mut2); 3523 static DEFINE_MUTEX(mut3); 3524 static DEFINE_MUTEX(mut4); 3525 static DEFINE_MUTEX(mut5); 3526 static DEFINE_MUTEX(mut6); 3527 static DEFINE_MUTEX(mut7); 3528 static DEFINE_MUTEX(mut8); 3529 static DEFINE_MUTEX(mut9); 3530 3531 static DECLARE_RWSEM(rwsem0); 3532 static DECLARE_RWSEM(rwsem1); 3533 static DECLARE_RWSEM(rwsem2); 3534 static DECLARE_RWSEM(rwsem3); 3535 static DECLARE_RWSEM(rwsem4); 3536 static DECLARE_RWSEM(rwsem5); 3537 static DECLARE_RWSEM(rwsem6); 3538 static DECLARE_RWSEM(rwsem7); 3539 static DECLARE_RWSEM(rwsem8); 3540 static DECLARE_RWSEM(rwsem9); 3541 3542 DEFINE_STATIC_SRCU(srcu0); 3543 DEFINE_STATIC_SRCU(srcu1); 3544 DEFINE_STATIC_SRCU(srcu2); 3545 DEFINE_STATIC_SRCU(srcu3); 3546 DEFINE_STATIC_SRCU(srcu4); 3547 DEFINE_STATIC_SRCU(srcu5); 3548 DEFINE_STATIC_SRCU(srcu6); 3549 DEFINE_STATIC_SRCU(srcu7); 3550 DEFINE_STATIC_SRCU(srcu8); 3551 DEFINE_STATIC_SRCU(srcu9); 3552 3553 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 3554 int cyclelen, int deadlock) 3555 { 3556 int j = i + 1; 3557 3558 if (j >= cyclelen) 3559 j = deadlock ? 0 : -1; 3560 if (j >= 0) 3561 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 3562 else 3563 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 3564 return j; 3565 } 3566 3567 // Test lockdep on SRCU-based deadlock scenarios. 3568 static void rcu_torture_init_srcu_lockdep(void) 3569 { 3570 int cyclelen; 3571 int deadlock; 3572 bool err = false; 3573 int i; 3574 int j; 3575 int idx; 3576 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 3577 &mut5, &mut6, &mut7, &mut8, &mut9 }; 3578 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 3579 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 3580 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 3581 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 3582 int testtype; 3583 3584 if (!test_srcu_lockdep) 3585 return; 3586 3587 deadlock = test_srcu_lockdep / 1000; 3588 testtype = (test_srcu_lockdep / 10) % 100; 3589 cyclelen = test_srcu_lockdep % 10; 3590 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 3591 if (WARN_ONCE(deadlock != !!deadlock, 3592 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 3593 __func__, test_srcu_lockdep, deadlock)) 3594 err = true; 3595 if (WARN_ONCE(cyclelen <= 0, 3596 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 3597 __func__, test_srcu_lockdep, cyclelen)) 3598 err = true; 3599 if (err) 3600 goto err_out; 3601 3602 if (testtype == 0) { 3603 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 3604 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3605 if (deadlock && cyclelen == 1) 3606 pr_info("%s: Expect hang.\n", __func__); 3607 for (i = 0; i < cyclelen; i++) { 3608 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 3609 "srcu_read_unlock", i, cyclelen, deadlock); 3610 idx = srcu_read_lock(srcus[i]); 3611 if (j >= 0) 3612 synchronize_srcu(srcus[j]); 3613 srcu_read_unlock(srcus[i], idx); 3614 } 3615 return; 3616 } 3617 3618 if (testtype == 1) { 3619 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 3620 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3621 for (i = 0; i < cyclelen; i++) { 3622 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 3623 __func__, i, i, i, i); 3624 idx = srcu_read_lock(srcus[i]); 3625 mutex_lock(muts[i]); 3626 mutex_unlock(muts[i]); 3627 srcu_read_unlock(srcus[i], idx); 3628 3629 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 3630 "mutex_unlock", i, cyclelen, deadlock); 3631 mutex_lock(muts[i]); 3632 if (j >= 0) 3633 synchronize_srcu(srcus[j]); 3634 mutex_unlock(muts[i]); 3635 } 3636 return; 3637 } 3638 3639 if (testtype == 2) { 3640 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 3641 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3642 for (i = 0; i < cyclelen; i++) { 3643 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 3644 __func__, i, i, i, i); 3645 idx = srcu_read_lock(srcus[i]); 3646 down_read(rwsems[i]); 3647 up_read(rwsems[i]); 3648 srcu_read_unlock(srcus[i], idx); 3649 3650 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 3651 "up_write", i, cyclelen, deadlock); 3652 down_write(rwsems[i]); 3653 if (j >= 0) 3654 synchronize_srcu(srcus[j]); 3655 up_write(rwsems[i]); 3656 } 3657 return; 3658 } 3659 3660 #ifdef CONFIG_TASKS_TRACE_RCU 3661 if (testtype == 3) { 3662 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 3663 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3664 if (deadlock && cyclelen == 1) 3665 pr_info("%s: Expect hang.\n", __func__); 3666 for (i = 0; i < cyclelen; i++) { 3667 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 3668 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 3669 : "synchronize_srcu"; 3670 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 3671 3672 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 3673 if (i == 0) 3674 rcu_read_lock_trace(); 3675 else 3676 idx = srcu_read_lock(srcus[i]); 3677 if (j >= 0) { 3678 if (i == cyclelen - 1) 3679 synchronize_rcu_tasks_trace(); 3680 else 3681 synchronize_srcu(srcus[j]); 3682 } 3683 if (i == 0) 3684 rcu_read_unlock_trace(); 3685 else 3686 srcu_read_unlock(srcus[i], idx); 3687 } 3688 return; 3689 } 3690 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 3691 3692 err_out: 3693 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 3694 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 3695 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 3696 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 3697 pr_info("%s: L: Cycle length.\n", __func__); 3698 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 3699 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 3700 } 3701 3702 static int __init 3703 rcu_torture_init(void) 3704 { 3705 long i; 3706 int cpu; 3707 int firsterr = 0; 3708 int flags = 0; 3709 unsigned long gp_seq = 0; 3710 static struct rcu_torture_ops *torture_ops[] = { 3711 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3712 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3713 &trivial_ops, 3714 }; 3715 3716 if (!torture_init_begin(torture_type, verbose)) 3717 return -EBUSY; 3718 3719 /* Process args and tell the world that the torturer is on the job. */ 3720 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3721 cur_ops = torture_ops[i]; 3722 if (strcmp(torture_type, cur_ops->name) == 0) 3723 break; 3724 } 3725 if (i == ARRAY_SIZE(torture_ops)) { 3726 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3727 torture_type); 3728 pr_alert("rcu-torture types:"); 3729 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3730 pr_cont(" %s", torture_ops[i]->name); 3731 pr_cont("\n"); 3732 firsterr = -EINVAL; 3733 cur_ops = NULL; 3734 goto unwind; 3735 } 3736 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3737 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3738 fqs_duration = 0; 3739 } 3740 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 3741 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3742 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 3743 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 3744 nocbs_nthreads = 0; 3745 } 3746 if (cur_ops->init) 3747 cur_ops->init(); 3748 3749 rcu_torture_init_srcu_lockdep(); 3750 3751 if (nreaders >= 0) { 3752 nrealreaders = nreaders; 3753 } else { 3754 nrealreaders = num_online_cpus() - 2 - nreaders; 3755 if (nrealreaders <= 0) 3756 nrealreaders = 1; 3757 } 3758 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3759 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3760 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3761 start_gp_seq = gp_seq; 3762 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3763 cur_ops->name, (long)gp_seq, flags); 3764 3765 /* Set up the freelist. */ 3766 3767 INIT_LIST_HEAD(&rcu_torture_freelist); 3768 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3769 rcu_tortures[i].rtort_mbtest = 0; 3770 list_add_tail(&rcu_tortures[i].rtort_free, 3771 &rcu_torture_freelist); 3772 } 3773 3774 /* Initialize the statistics so that each run gets its own numbers. */ 3775 3776 rcu_torture_current = NULL; 3777 rcu_torture_current_version = 0; 3778 atomic_set(&n_rcu_torture_alloc, 0); 3779 atomic_set(&n_rcu_torture_alloc_fail, 0); 3780 atomic_set(&n_rcu_torture_free, 0); 3781 atomic_set(&n_rcu_torture_mberror, 0); 3782 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3783 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3784 atomic_set(&n_rcu_torture_error, 0); 3785 n_rcu_torture_barrier_error = 0; 3786 n_rcu_torture_boost_ktrerror = 0; 3787 n_rcu_torture_boost_failure = 0; 3788 n_rcu_torture_boosts = 0; 3789 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3790 atomic_set(&rcu_torture_wcount[i], 0); 3791 for_each_possible_cpu(cpu) { 3792 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3793 per_cpu(rcu_torture_count, cpu)[i] = 0; 3794 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3795 } 3796 } 3797 err_segs_recorded = 0; 3798 rt_read_nsegs = 0; 3799 3800 /* Start up the kthreads. */ 3801 3802 rcu_torture_write_types(); 3803 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3804 writer_task); 3805 if (torture_init_error(firsterr)) 3806 goto unwind; 3807 if (nfakewriters > 0) { 3808 fakewriter_tasks = kcalloc(nfakewriters, 3809 sizeof(fakewriter_tasks[0]), 3810 GFP_KERNEL); 3811 if (fakewriter_tasks == NULL) { 3812 TOROUT_ERRSTRING("out of memory"); 3813 firsterr = -ENOMEM; 3814 goto unwind; 3815 } 3816 } 3817 for (i = 0; i < nfakewriters; i++) { 3818 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3819 NULL, fakewriter_tasks[i]); 3820 if (torture_init_error(firsterr)) 3821 goto unwind; 3822 } 3823 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3824 GFP_KERNEL); 3825 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3826 GFP_KERNEL); 3827 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3828 TOROUT_ERRSTRING("out of memory"); 3829 firsterr = -ENOMEM; 3830 goto unwind; 3831 } 3832 for (i = 0; i < nrealreaders; i++) { 3833 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3834 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3835 reader_tasks[i]); 3836 if (torture_init_error(firsterr)) 3837 goto unwind; 3838 } 3839 nrealnocbers = nocbs_nthreads; 3840 if (WARN_ON(nrealnocbers < 0)) 3841 nrealnocbers = 1; 3842 if (WARN_ON(nocbs_toggle < 0)) 3843 nocbs_toggle = HZ; 3844 if (nrealnocbers > 0) { 3845 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3846 if (nocb_tasks == NULL) { 3847 TOROUT_ERRSTRING("out of memory"); 3848 firsterr = -ENOMEM; 3849 goto unwind; 3850 } 3851 } else { 3852 nocb_tasks = NULL; 3853 } 3854 for (i = 0; i < nrealnocbers; i++) { 3855 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3856 if (torture_init_error(firsterr)) 3857 goto unwind; 3858 } 3859 if (stat_interval > 0) { 3860 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3861 stats_task); 3862 if (torture_init_error(firsterr)) 3863 goto unwind; 3864 } 3865 if (test_no_idle_hz && shuffle_interval > 0) { 3866 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3867 if (torture_init_error(firsterr)) 3868 goto unwind; 3869 } 3870 if (stutter < 0) 3871 stutter = 0; 3872 if (stutter) { 3873 int t; 3874 3875 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3876 firsterr = torture_stutter_init(stutter * HZ, t); 3877 if (torture_init_error(firsterr)) 3878 goto unwind; 3879 } 3880 if (fqs_duration < 0) 3881 fqs_duration = 0; 3882 if (fqs_holdoff < 0) 3883 fqs_holdoff = 0; 3884 if (fqs_duration && fqs_holdoff) { 3885 /* Create the fqs thread */ 3886 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3887 fqs_task); 3888 if (torture_init_error(firsterr)) 3889 goto unwind; 3890 } 3891 if (test_boost_interval < 1) 3892 test_boost_interval = 1; 3893 if (test_boost_duration < 2) 3894 test_boost_duration = 2; 3895 if (rcu_torture_can_boost()) { 3896 3897 boost_starttime = jiffies + test_boost_interval * HZ; 3898 3899 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3900 rcutorture_booster_init, 3901 rcutorture_booster_cleanup); 3902 rcutor_hp = firsterr; 3903 if (torture_init_error(firsterr)) 3904 goto unwind; 3905 } 3906 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3907 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3908 if (torture_init_error(firsterr)) 3909 goto unwind; 3910 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3911 rcutorture_sync); 3912 if (torture_init_error(firsterr)) 3913 goto unwind; 3914 firsterr = rcu_torture_stall_init(); 3915 if (torture_init_error(firsterr)) 3916 goto unwind; 3917 firsterr = rcu_torture_fwd_prog_init(); 3918 if (torture_init_error(firsterr)) 3919 goto unwind; 3920 firsterr = rcu_torture_barrier_init(); 3921 if (torture_init_error(firsterr)) 3922 goto unwind; 3923 firsterr = rcu_torture_read_exit_init(); 3924 if (torture_init_error(firsterr)) 3925 goto unwind; 3926 if (object_debug) 3927 rcu_test_debug_objects(); 3928 torture_init_end(); 3929 rcu_gp_slow_register(&rcu_fwd_cb_nodelay); 3930 return 0; 3931 3932 unwind: 3933 torture_init_end(); 3934 rcu_torture_cleanup(); 3935 if (shutdown_secs) { 3936 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3937 kernel_power_off(); 3938 } 3939 return firsterr; 3940 } 3941 3942 module_init(rcu_torture_init); 3943 module_exit(rcu_torture_cleanup); 3944