1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 /* Bits for ->extendables field, extendables param, and related definitions. */ 59 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 62 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 64 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 65 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 66 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 67 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 68 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 69 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 70 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 71 #define RCUTORTURE_MAX_EXTEND \ 72 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 73 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 74 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 75 /* Must be power of two minus one. */ 76 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 77 78 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 79 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 80 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 81 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 82 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 83 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 84 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 85 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 86 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 87 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 88 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 89 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 90 torture_param(bool, gp_cond_exp_full, false, 91 "Use conditional/async full-stateexpedited GP wait primitives"); 92 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 93 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 94 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 95 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 96 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 97 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 98 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 99 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 100 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 101 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 102 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 103 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 104 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 105 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 106 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 107 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 108 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 109 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 110 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 114 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 115 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 116 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 117 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 118 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one."); 119 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 120 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 121 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 122 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 123 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 124 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 125 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 126 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 127 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 128 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 129 130 static char *torture_type = "rcu"; 131 module_param(torture_type, charp, 0444); 132 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 133 134 static int nrealnocbers; 135 static int nrealreaders; 136 static struct task_struct *writer_task; 137 static struct task_struct **fakewriter_tasks; 138 static struct task_struct **reader_tasks; 139 static struct task_struct **nocb_tasks; 140 static struct task_struct *stats_task; 141 static struct task_struct *fqs_task; 142 static struct task_struct *boost_tasks[NR_CPUS]; 143 static struct task_struct *stall_task; 144 static struct task_struct **fwd_prog_tasks; 145 static struct task_struct **barrier_cbs_tasks; 146 static struct task_struct *barrier_task; 147 static struct task_struct *read_exit_task; 148 149 #define RCU_TORTURE_PIPE_LEN 10 150 151 // Mailbox-like structure to check RCU global memory ordering. 152 struct rcu_torture_reader_check { 153 unsigned long rtc_myloops; 154 int rtc_chkrdr; 155 unsigned long rtc_chkloops; 156 int rtc_ready; 157 struct rcu_torture_reader_check *rtc_assigner; 158 } ____cacheline_internodealigned_in_smp; 159 160 // Update-side data structure used to check RCU readers. 161 struct rcu_torture { 162 struct rcu_head rtort_rcu; 163 int rtort_pipe_count; 164 struct list_head rtort_free; 165 int rtort_mbtest; 166 struct rcu_torture_reader_check *rtort_chkp; 167 }; 168 169 static LIST_HEAD(rcu_torture_freelist); 170 static struct rcu_torture __rcu *rcu_torture_current; 171 static unsigned long rcu_torture_current_version; 172 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 173 static DEFINE_SPINLOCK(rcu_torture_lock); 174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 175 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 176 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 177 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 178 static atomic_t n_rcu_torture_alloc; 179 static atomic_t n_rcu_torture_alloc_fail; 180 static atomic_t n_rcu_torture_free; 181 static atomic_t n_rcu_torture_mberror; 182 static atomic_t n_rcu_torture_mbchk_fail; 183 static atomic_t n_rcu_torture_mbchk_tries; 184 static atomic_t n_rcu_torture_error; 185 static long n_rcu_torture_barrier_error; 186 static long n_rcu_torture_boost_ktrerror; 187 static long n_rcu_torture_boost_failure; 188 static long n_rcu_torture_boosts; 189 static atomic_long_t n_rcu_torture_timers; 190 static long n_barrier_attempts; 191 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 192 static unsigned long n_read_exits; 193 static struct list_head rcu_torture_removed; 194 static unsigned long shutdown_jiffies; 195 static unsigned long start_gp_seq; 196 static atomic_long_t n_nocb_offload; 197 static atomic_long_t n_nocb_deoffload; 198 199 static int rcu_torture_writer_state; 200 #define RTWS_FIXED_DELAY 0 201 #define RTWS_DELAY 1 202 #define RTWS_REPLACE 2 203 #define RTWS_DEF_FREE 3 204 #define RTWS_EXP_SYNC 4 205 #define RTWS_COND_GET 5 206 #define RTWS_COND_GET_FULL 6 207 #define RTWS_COND_GET_EXP 7 208 #define RTWS_COND_GET_EXP_FULL 8 209 #define RTWS_COND_SYNC 9 210 #define RTWS_COND_SYNC_FULL 10 211 #define RTWS_COND_SYNC_EXP 11 212 #define RTWS_COND_SYNC_EXP_FULL 12 213 #define RTWS_POLL_GET 13 214 #define RTWS_POLL_GET_FULL 14 215 #define RTWS_POLL_GET_EXP 15 216 #define RTWS_POLL_GET_EXP_FULL 16 217 #define RTWS_POLL_WAIT 17 218 #define RTWS_POLL_WAIT_FULL 18 219 #define RTWS_POLL_WAIT_EXP 19 220 #define RTWS_POLL_WAIT_EXP_FULL 20 221 #define RTWS_SYNC 21 222 #define RTWS_STUTTER 22 223 #define RTWS_STOPPING 23 224 static const char * const rcu_torture_writer_state_names[] = { 225 "RTWS_FIXED_DELAY", 226 "RTWS_DELAY", 227 "RTWS_REPLACE", 228 "RTWS_DEF_FREE", 229 "RTWS_EXP_SYNC", 230 "RTWS_COND_GET", 231 "RTWS_COND_GET_FULL", 232 "RTWS_COND_GET_EXP", 233 "RTWS_COND_GET_EXP_FULL", 234 "RTWS_COND_SYNC", 235 "RTWS_COND_SYNC_FULL", 236 "RTWS_COND_SYNC_EXP", 237 "RTWS_COND_SYNC_EXP_FULL", 238 "RTWS_POLL_GET", 239 "RTWS_POLL_GET_FULL", 240 "RTWS_POLL_GET_EXP", 241 "RTWS_POLL_GET_EXP_FULL", 242 "RTWS_POLL_WAIT", 243 "RTWS_POLL_WAIT_FULL", 244 "RTWS_POLL_WAIT_EXP", 245 "RTWS_POLL_WAIT_EXP_FULL", 246 "RTWS_SYNC", 247 "RTWS_STUTTER", 248 "RTWS_STOPPING", 249 }; 250 251 /* Record reader segment types and duration for first failing read. */ 252 struct rt_read_seg { 253 int rt_readstate; 254 unsigned long rt_delay_jiffies; 255 unsigned long rt_delay_ms; 256 unsigned long rt_delay_us; 257 bool rt_preempted; 258 }; 259 static int err_segs_recorded; 260 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 261 static int rt_read_nsegs; 262 263 static const char *rcu_torture_writer_state_getname(void) 264 { 265 unsigned int i = READ_ONCE(rcu_torture_writer_state); 266 267 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 268 return "???"; 269 return rcu_torture_writer_state_names[i]; 270 } 271 272 #ifdef CONFIG_RCU_TRACE 273 static u64 notrace rcu_trace_clock_local(void) 274 { 275 u64 ts = trace_clock_local(); 276 277 (void)do_div(ts, NSEC_PER_USEC); 278 return ts; 279 } 280 #else /* #ifdef CONFIG_RCU_TRACE */ 281 static u64 notrace rcu_trace_clock_local(void) 282 { 283 return 0ULL; 284 } 285 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 286 287 /* 288 * Stop aggressive CPU-hog tests a bit before the end of the test in order 289 * to avoid interfering with test shutdown. 290 */ 291 static bool shutdown_time_arrived(void) 292 { 293 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 294 } 295 296 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 297 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 298 /* and boost task create/destroy. */ 299 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 300 static bool barrier_phase; /* Test phase. */ 301 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 302 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 303 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 304 305 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 306 307 /* 308 * Allocate an element from the rcu_tortures pool. 309 */ 310 static struct rcu_torture * 311 rcu_torture_alloc(void) 312 { 313 struct list_head *p; 314 315 spin_lock_bh(&rcu_torture_lock); 316 if (list_empty(&rcu_torture_freelist)) { 317 atomic_inc(&n_rcu_torture_alloc_fail); 318 spin_unlock_bh(&rcu_torture_lock); 319 return NULL; 320 } 321 atomic_inc(&n_rcu_torture_alloc); 322 p = rcu_torture_freelist.next; 323 list_del_init(p); 324 spin_unlock_bh(&rcu_torture_lock); 325 return container_of(p, struct rcu_torture, rtort_free); 326 } 327 328 /* 329 * Free an element to the rcu_tortures pool. 330 */ 331 static void 332 rcu_torture_free(struct rcu_torture *p) 333 { 334 atomic_inc(&n_rcu_torture_free); 335 spin_lock_bh(&rcu_torture_lock); 336 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 337 spin_unlock_bh(&rcu_torture_lock); 338 } 339 340 /* 341 * Operations vector for selecting different types of tests. 342 */ 343 344 struct rcu_torture_ops { 345 int ttype; 346 void (*init)(void); 347 void (*cleanup)(void); 348 int (*readlock)(void); 349 void (*read_delay)(struct torture_random_state *rrsp, 350 struct rt_read_seg *rtrsp); 351 void (*readunlock)(int idx); 352 int (*readlock_held)(void); 353 unsigned long (*get_gp_seq)(void); 354 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 355 void (*deferred_free)(struct rcu_torture *p); 356 void (*sync)(void); 357 void (*exp_sync)(void); 358 unsigned long (*get_gp_state_exp)(void); 359 unsigned long (*start_gp_poll_exp)(void); 360 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 361 bool (*poll_gp_state_exp)(unsigned long oldstate); 362 void (*cond_sync_exp)(unsigned long oldstate); 363 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 364 unsigned long (*get_comp_state)(void); 365 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 366 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 367 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 368 unsigned long (*get_gp_state)(void); 369 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 370 unsigned long (*start_gp_poll)(void); 371 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 372 bool (*poll_gp_state)(unsigned long oldstate); 373 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 374 bool (*poll_need_2gp)(bool poll, bool poll_full); 375 void (*cond_sync)(unsigned long oldstate); 376 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 377 int poll_active; 378 int poll_active_full; 379 call_rcu_func_t call; 380 void (*cb_barrier)(void); 381 void (*fqs)(void); 382 void (*stats)(void); 383 void (*gp_kthread_dbg)(void); 384 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 385 int (*stall_dur)(void); 386 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 387 void (*gp_slow_register)(atomic_t *rgssp); 388 void (*gp_slow_unregister)(atomic_t *rgssp); 389 long cbflood_max; 390 int irq_capable; 391 int can_boost; 392 int extendables; 393 int slow_gps; 394 int no_pi_lock; 395 int debug_objects; 396 const char *name; 397 }; 398 399 static struct rcu_torture_ops *cur_ops; 400 401 /* 402 * Definitions for rcu torture testing. 403 */ 404 405 static int torture_readlock_not_held(void) 406 { 407 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 408 } 409 410 static int rcu_torture_read_lock(void) 411 { 412 rcu_read_lock(); 413 return 0; 414 } 415 416 static void 417 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 418 { 419 unsigned long started; 420 unsigned long completed; 421 const unsigned long shortdelay_us = 200; 422 unsigned long longdelay_ms = 300; 423 unsigned long long ts; 424 425 /* We want a short delay sometimes to make a reader delay the grace 426 * period, and we want a long delay occasionally to trigger 427 * force_quiescent_state. */ 428 429 if (!atomic_read(&rcu_fwd_cb_nodelay) && 430 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 431 started = cur_ops->get_gp_seq(); 432 ts = rcu_trace_clock_local(); 433 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 434 longdelay_ms = 5; /* Avoid triggering BH limits. */ 435 mdelay(longdelay_ms); 436 rtrsp->rt_delay_ms = longdelay_ms; 437 completed = cur_ops->get_gp_seq(); 438 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 439 started, completed); 440 } 441 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 442 udelay(shortdelay_us); 443 rtrsp->rt_delay_us = shortdelay_us; 444 } 445 if (!preempt_count() && 446 !(torture_random(rrsp) % (nrealreaders * 500))) { 447 torture_preempt_schedule(); /* QS only if preemptible. */ 448 rtrsp->rt_preempted = true; 449 } 450 } 451 452 static void rcu_torture_read_unlock(int idx) 453 { 454 rcu_read_unlock(); 455 } 456 457 /* 458 * Update callback in the pipe. This should be invoked after a grace period. 459 */ 460 static bool 461 rcu_torture_pipe_update_one(struct rcu_torture *rp) 462 { 463 int i; 464 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 465 466 if (rtrcp) { 467 WRITE_ONCE(rp->rtort_chkp, NULL); 468 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 469 } 470 i = rp->rtort_pipe_count; 471 if (i > RCU_TORTURE_PIPE_LEN) 472 i = RCU_TORTURE_PIPE_LEN; 473 atomic_inc(&rcu_torture_wcount[i]); 474 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 475 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 476 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 477 rp->rtort_mbtest = 0; 478 return true; 479 } 480 return false; 481 } 482 483 /* 484 * Update all callbacks in the pipe. Suitable for synchronous grace-period 485 * primitives. 486 */ 487 static void 488 rcu_torture_pipe_update(struct rcu_torture *old_rp) 489 { 490 struct rcu_torture *rp; 491 struct rcu_torture *rp1; 492 493 if (old_rp) 494 list_add(&old_rp->rtort_free, &rcu_torture_removed); 495 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 496 if (rcu_torture_pipe_update_one(rp)) { 497 list_del(&rp->rtort_free); 498 rcu_torture_free(rp); 499 } 500 } 501 } 502 503 static void 504 rcu_torture_cb(struct rcu_head *p) 505 { 506 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 507 508 if (torture_must_stop_irq()) { 509 /* Test is ending, just drop callbacks on the floor. */ 510 /* The next initialization will pick up the pieces. */ 511 return; 512 } 513 if (rcu_torture_pipe_update_one(rp)) 514 rcu_torture_free(rp); 515 else 516 cur_ops->deferred_free(rp); 517 } 518 519 static unsigned long rcu_no_completed(void) 520 { 521 return 0; 522 } 523 524 static void rcu_torture_deferred_free(struct rcu_torture *p) 525 { 526 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 527 } 528 529 static void rcu_sync_torture_init(void) 530 { 531 INIT_LIST_HEAD(&rcu_torture_removed); 532 } 533 534 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 535 { 536 return poll; 537 } 538 539 static struct rcu_torture_ops rcu_ops = { 540 .ttype = RCU_FLAVOR, 541 .init = rcu_sync_torture_init, 542 .readlock = rcu_torture_read_lock, 543 .read_delay = rcu_read_delay, 544 .readunlock = rcu_torture_read_unlock, 545 .readlock_held = torture_readlock_not_held, 546 .get_gp_seq = rcu_get_gp_seq, 547 .gp_diff = rcu_seq_diff, 548 .deferred_free = rcu_torture_deferred_free, 549 .sync = synchronize_rcu, 550 .exp_sync = synchronize_rcu_expedited, 551 .same_gp_state = same_state_synchronize_rcu, 552 .same_gp_state_full = same_state_synchronize_rcu_full, 553 .get_comp_state = get_completed_synchronize_rcu, 554 .get_comp_state_full = get_completed_synchronize_rcu_full, 555 .get_gp_state = get_state_synchronize_rcu, 556 .get_gp_state_full = get_state_synchronize_rcu_full, 557 .start_gp_poll = start_poll_synchronize_rcu, 558 .start_gp_poll_full = start_poll_synchronize_rcu_full, 559 .poll_gp_state = poll_state_synchronize_rcu, 560 .poll_gp_state_full = poll_state_synchronize_rcu_full, 561 .poll_need_2gp = rcu_poll_need_2gp, 562 .cond_sync = cond_synchronize_rcu, 563 .cond_sync_full = cond_synchronize_rcu_full, 564 .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE, 565 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE, 566 .get_gp_state_exp = get_state_synchronize_rcu, 567 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 568 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 569 .poll_gp_state_exp = poll_state_synchronize_rcu, 570 .cond_sync_exp = cond_synchronize_rcu_expedited, 571 .call = call_rcu_hurry, 572 .cb_barrier = rcu_barrier, 573 .fqs = rcu_force_quiescent_state, 574 .gp_kthread_dbg = show_rcu_gp_kthreads, 575 .check_boost_failed = rcu_check_boost_fail, 576 .stall_dur = rcu_jiffies_till_stall_check, 577 .get_gp_data = rcutorture_get_gp_data, 578 .gp_slow_register = rcu_gp_slow_register, 579 .gp_slow_unregister = rcu_gp_slow_unregister, 580 .irq_capable = 1, 581 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 582 .extendables = RCUTORTURE_MAX_EXTEND, 583 .debug_objects = 1, 584 .name = "rcu" 585 }; 586 587 /* 588 * Don't even think about trying any of these in real life!!! 589 * The names includes "busted", and they really means it! 590 * The only purpose of these functions is to provide a buggy RCU 591 * implementation to make sure that rcutorture correctly emits 592 * buggy-RCU error messages. 593 */ 594 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 595 { 596 /* This is a deliberate bug for testing purposes only! */ 597 rcu_torture_cb(&p->rtort_rcu); 598 } 599 600 static void synchronize_rcu_busted(void) 601 { 602 /* This is a deliberate bug for testing purposes only! */ 603 } 604 605 static void 606 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 607 { 608 /* This is a deliberate bug for testing purposes only! */ 609 func(head); 610 } 611 612 static struct rcu_torture_ops rcu_busted_ops = { 613 .ttype = INVALID_RCU_FLAVOR, 614 .init = rcu_sync_torture_init, 615 .readlock = rcu_torture_read_lock, 616 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 617 .readunlock = rcu_torture_read_unlock, 618 .readlock_held = torture_readlock_not_held, 619 .get_gp_seq = rcu_no_completed, 620 .deferred_free = rcu_busted_torture_deferred_free, 621 .sync = synchronize_rcu_busted, 622 .exp_sync = synchronize_rcu_busted, 623 .call = call_rcu_busted, 624 .irq_capable = 1, 625 .name = "busted" 626 }; 627 628 /* 629 * Definitions for srcu torture testing. 630 */ 631 632 DEFINE_STATIC_SRCU(srcu_ctl); 633 static struct srcu_struct srcu_ctld; 634 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 635 static struct rcu_torture_ops srcud_ops; 636 637 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 638 { 639 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 640 } 641 642 static int srcu_torture_read_lock(void) 643 { 644 if (cur_ops == &srcud_ops) 645 return srcu_read_lock_nmisafe(srcu_ctlp); 646 else 647 return srcu_read_lock(srcu_ctlp); 648 } 649 650 static void 651 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 652 { 653 long delay; 654 const long uspertick = 1000000 / HZ; 655 const long longdelay = 10; 656 657 /* We want there to be long-running readers, but not all the time. */ 658 659 delay = torture_random(rrsp) % 660 (nrealreaders * 2 * longdelay * uspertick); 661 if (!delay && in_task()) { 662 schedule_timeout_interruptible(longdelay); 663 rtrsp->rt_delay_jiffies = longdelay; 664 } else { 665 rcu_read_delay(rrsp, rtrsp); 666 } 667 } 668 669 static void srcu_torture_read_unlock(int idx) 670 { 671 if (cur_ops == &srcud_ops) 672 srcu_read_unlock_nmisafe(srcu_ctlp, idx); 673 else 674 srcu_read_unlock(srcu_ctlp, idx); 675 } 676 677 static int torture_srcu_read_lock_held(void) 678 { 679 return srcu_read_lock_held(srcu_ctlp); 680 } 681 682 static unsigned long srcu_torture_completed(void) 683 { 684 return srcu_batches_completed(srcu_ctlp); 685 } 686 687 static void srcu_torture_deferred_free(struct rcu_torture *rp) 688 { 689 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 690 } 691 692 static void srcu_torture_synchronize(void) 693 { 694 synchronize_srcu(srcu_ctlp); 695 } 696 697 static unsigned long srcu_torture_get_gp_state(void) 698 { 699 return get_state_synchronize_srcu(srcu_ctlp); 700 } 701 702 static unsigned long srcu_torture_start_gp_poll(void) 703 { 704 return start_poll_synchronize_srcu(srcu_ctlp); 705 } 706 707 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 708 { 709 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 710 } 711 712 static void srcu_torture_call(struct rcu_head *head, 713 rcu_callback_t func) 714 { 715 call_srcu(srcu_ctlp, head, func); 716 } 717 718 static void srcu_torture_barrier(void) 719 { 720 srcu_barrier(srcu_ctlp); 721 } 722 723 static void srcu_torture_stats(void) 724 { 725 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 726 } 727 728 static void srcu_torture_synchronize_expedited(void) 729 { 730 synchronize_srcu_expedited(srcu_ctlp); 731 } 732 733 static struct rcu_torture_ops srcu_ops = { 734 .ttype = SRCU_FLAVOR, 735 .init = rcu_sync_torture_init, 736 .readlock = srcu_torture_read_lock, 737 .read_delay = srcu_read_delay, 738 .readunlock = srcu_torture_read_unlock, 739 .readlock_held = torture_srcu_read_lock_held, 740 .get_gp_seq = srcu_torture_completed, 741 .deferred_free = srcu_torture_deferred_free, 742 .sync = srcu_torture_synchronize, 743 .exp_sync = srcu_torture_synchronize_expedited, 744 .same_gp_state = same_state_synchronize_srcu, 745 .get_comp_state = get_completed_synchronize_srcu, 746 .get_gp_state = srcu_torture_get_gp_state, 747 .start_gp_poll = srcu_torture_start_gp_poll, 748 .poll_gp_state = srcu_torture_poll_gp_state, 749 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 750 .call = srcu_torture_call, 751 .cb_barrier = srcu_torture_barrier, 752 .stats = srcu_torture_stats, 753 .get_gp_data = srcu_get_gp_data, 754 .cbflood_max = 50000, 755 .irq_capable = 1, 756 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 757 .debug_objects = 1, 758 .name = "srcu" 759 }; 760 761 static void srcu_torture_init(void) 762 { 763 rcu_sync_torture_init(); 764 WARN_ON(init_srcu_struct(&srcu_ctld)); 765 srcu_ctlp = &srcu_ctld; 766 } 767 768 static void srcu_torture_cleanup(void) 769 { 770 cleanup_srcu_struct(&srcu_ctld); 771 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 772 } 773 774 /* As above, but dynamically allocated. */ 775 static struct rcu_torture_ops srcud_ops = { 776 .ttype = SRCU_FLAVOR, 777 .init = srcu_torture_init, 778 .cleanup = srcu_torture_cleanup, 779 .readlock = srcu_torture_read_lock, 780 .read_delay = srcu_read_delay, 781 .readunlock = srcu_torture_read_unlock, 782 .readlock_held = torture_srcu_read_lock_held, 783 .get_gp_seq = srcu_torture_completed, 784 .deferred_free = srcu_torture_deferred_free, 785 .sync = srcu_torture_synchronize, 786 .exp_sync = srcu_torture_synchronize_expedited, 787 .same_gp_state = same_state_synchronize_srcu, 788 .get_comp_state = get_completed_synchronize_srcu, 789 .get_gp_state = srcu_torture_get_gp_state, 790 .start_gp_poll = srcu_torture_start_gp_poll, 791 .poll_gp_state = srcu_torture_poll_gp_state, 792 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 793 .call = srcu_torture_call, 794 .cb_barrier = srcu_torture_barrier, 795 .stats = srcu_torture_stats, 796 .get_gp_data = srcu_get_gp_data, 797 .cbflood_max = 50000, 798 .irq_capable = 1, 799 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 800 .debug_objects = 1, 801 .name = "srcud" 802 }; 803 804 /* As above, but broken due to inappropriate reader extension. */ 805 static struct rcu_torture_ops busted_srcud_ops = { 806 .ttype = SRCU_FLAVOR, 807 .init = srcu_torture_init, 808 .cleanup = srcu_torture_cleanup, 809 .readlock = srcu_torture_read_lock, 810 .read_delay = rcu_read_delay, 811 .readunlock = srcu_torture_read_unlock, 812 .readlock_held = torture_srcu_read_lock_held, 813 .get_gp_seq = srcu_torture_completed, 814 .deferred_free = srcu_torture_deferred_free, 815 .sync = srcu_torture_synchronize, 816 .exp_sync = srcu_torture_synchronize_expedited, 817 .call = srcu_torture_call, 818 .cb_barrier = srcu_torture_barrier, 819 .stats = srcu_torture_stats, 820 .irq_capable = 1, 821 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 822 .extendables = RCUTORTURE_MAX_EXTEND, 823 .name = "busted_srcud" 824 }; 825 826 /* 827 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 828 * This implementation does not necessarily work well with CPU hotplug. 829 */ 830 831 static void synchronize_rcu_trivial(void) 832 { 833 int cpu; 834 835 for_each_online_cpu(cpu) { 836 torture_sched_setaffinity(current->pid, cpumask_of(cpu)); 837 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 838 } 839 } 840 841 static int rcu_torture_read_lock_trivial(void) 842 { 843 preempt_disable(); 844 return 0; 845 } 846 847 static void rcu_torture_read_unlock_trivial(int idx) 848 { 849 preempt_enable(); 850 } 851 852 static struct rcu_torture_ops trivial_ops = { 853 .ttype = RCU_TRIVIAL_FLAVOR, 854 .init = rcu_sync_torture_init, 855 .readlock = rcu_torture_read_lock_trivial, 856 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 857 .readunlock = rcu_torture_read_unlock_trivial, 858 .readlock_held = torture_readlock_not_held, 859 .get_gp_seq = rcu_no_completed, 860 .sync = synchronize_rcu_trivial, 861 .exp_sync = synchronize_rcu_trivial, 862 .irq_capable = 1, 863 .name = "trivial" 864 }; 865 866 #ifdef CONFIG_TASKS_RCU 867 868 /* 869 * Definitions for RCU-tasks torture testing. 870 */ 871 872 static int tasks_torture_read_lock(void) 873 { 874 return 0; 875 } 876 877 static void tasks_torture_read_unlock(int idx) 878 { 879 } 880 881 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 882 { 883 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 884 } 885 886 static void synchronize_rcu_mult_test(void) 887 { 888 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 889 } 890 891 static struct rcu_torture_ops tasks_ops = { 892 .ttype = RCU_TASKS_FLAVOR, 893 .init = rcu_sync_torture_init, 894 .readlock = tasks_torture_read_lock, 895 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 896 .readunlock = tasks_torture_read_unlock, 897 .get_gp_seq = rcu_no_completed, 898 .deferred_free = rcu_tasks_torture_deferred_free, 899 .sync = synchronize_rcu_tasks, 900 .exp_sync = synchronize_rcu_mult_test, 901 .call = call_rcu_tasks, 902 .cb_barrier = rcu_barrier_tasks, 903 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 904 .get_gp_data = rcu_tasks_get_gp_data, 905 .irq_capable = 1, 906 .slow_gps = 1, 907 .name = "tasks" 908 }; 909 910 #define TASKS_OPS &tasks_ops, 911 912 #else // #ifdef CONFIG_TASKS_RCU 913 914 #define TASKS_OPS 915 916 #endif // #else #ifdef CONFIG_TASKS_RCU 917 918 919 #ifdef CONFIG_TASKS_RUDE_RCU 920 921 /* 922 * Definitions for rude RCU-tasks torture testing. 923 */ 924 925 static struct rcu_torture_ops tasks_rude_ops = { 926 .ttype = RCU_TASKS_RUDE_FLAVOR, 927 .init = rcu_sync_torture_init, 928 .readlock = rcu_torture_read_lock_trivial, 929 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 930 .readunlock = rcu_torture_read_unlock_trivial, 931 .get_gp_seq = rcu_no_completed, 932 .sync = synchronize_rcu_tasks_rude, 933 .exp_sync = synchronize_rcu_tasks_rude, 934 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 935 .get_gp_data = rcu_tasks_rude_get_gp_data, 936 .cbflood_max = 50000, 937 .irq_capable = 1, 938 .name = "tasks-rude" 939 }; 940 941 #define TASKS_RUDE_OPS &tasks_rude_ops, 942 943 #else // #ifdef CONFIG_TASKS_RUDE_RCU 944 945 #define TASKS_RUDE_OPS 946 947 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 948 949 950 #ifdef CONFIG_TASKS_TRACE_RCU 951 952 /* 953 * Definitions for tracing RCU-tasks torture testing. 954 */ 955 956 static int tasks_tracing_torture_read_lock(void) 957 { 958 rcu_read_lock_trace(); 959 return 0; 960 } 961 962 static void tasks_tracing_torture_read_unlock(int idx) 963 { 964 rcu_read_unlock_trace(); 965 } 966 967 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 968 { 969 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 970 } 971 972 static struct rcu_torture_ops tasks_tracing_ops = { 973 .ttype = RCU_TASKS_TRACING_FLAVOR, 974 .init = rcu_sync_torture_init, 975 .readlock = tasks_tracing_torture_read_lock, 976 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 977 .readunlock = tasks_tracing_torture_read_unlock, 978 .readlock_held = rcu_read_lock_trace_held, 979 .get_gp_seq = rcu_no_completed, 980 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 981 .sync = synchronize_rcu_tasks_trace, 982 .exp_sync = synchronize_rcu_tasks_trace, 983 .call = call_rcu_tasks_trace, 984 .cb_barrier = rcu_barrier_tasks_trace, 985 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 986 .get_gp_data = rcu_tasks_trace_get_gp_data, 987 .cbflood_max = 50000, 988 .irq_capable = 1, 989 .slow_gps = 1, 990 .name = "tasks-tracing" 991 }; 992 993 #define TASKS_TRACING_OPS &tasks_tracing_ops, 994 995 #else // #ifdef CONFIG_TASKS_TRACE_RCU 996 997 #define TASKS_TRACING_OPS 998 999 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 1000 1001 1002 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 1003 { 1004 if (!cur_ops->gp_diff) 1005 return new - old; 1006 return cur_ops->gp_diff(new, old); 1007 } 1008 1009 /* 1010 * RCU torture priority-boost testing. Runs one real-time thread per 1011 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1012 * for them to complete. If a given grace period takes too long, we assume 1013 * that priority inversion has occurred. 1014 */ 1015 1016 static int old_rt_runtime = -1; 1017 1018 static void rcu_torture_disable_rt_throttle(void) 1019 { 1020 /* 1021 * Disable RT throttling so that rcutorture's boost threads don't get 1022 * throttled. Only possible if rcutorture is built-in otherwise the 1023 * user should manually do this by setting the sched_rt_period_us and 1024 * sched_rt_runtime sysctls. 1025 */ 1026 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1027 return; 1028 1029 old_rt_runtime = sysctl_sched_rt_runtime; 1030 sysctl_sched_rt_runtime = -1; 1031 } 1032 1033 static void rcu_torture_enable_rt_throttle(void) 1034 { 1035 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1036 return; 1037 1038 sysctl_sched_rt_runtime = old_rt_runtime; 1039 old_rt_runtime = -1; 1040 } 1041 1042 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1043 { 1044 int cpu; 1045 static int dbg_done; 1046 unsigned long end = jiffies; 1047 bool gp_done; 1048 unsigned long j; 1049 static unsigned long last_persist; 1050 unsigned long lp; 1051 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1052 1053 if (end - *start > mininterval) { 1054 // Recheck after checking time to avoid false positives. 1055 smp_mb(); // Time check before grace-period check. 1056 if (cur_ops->poll_gp_state(gp_state)) 1057 return false; // passed, though perhaps just barely 1058 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1059 // At most one persisted message per boost test. 1060 j = jiffies; 1061 lp = READ_ONCE(last_persist); 1062 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1063 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1064 return false; // passed on a technicality 1065 } 1066 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1067 n_rcu_torture_boost_failure++; 1068 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1069 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1070 current->rt_priority, gp_state, end - *start); 1071 cur_ops->gp_kthread_dbg(); 1072 // Recheck after print to flag grace period ending during splat. 1073 gp_done = cur_ops->poll_gp_state(gp_state); 1074 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1075 gp_done ? "ended already" : "still pending"); 1076 1077 } 1078 1079 return true; // failed 1080 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1081 *start = jiffies; 1082 } 1083 1084 return false; // passed 1085 } 1086 1087 static int rcu_torture_boost(void *arg) 1088 { 1089 unsigned long endtime; 1090 unsigned long gp_state; 1091 unsigned long gp_state_time; 1092 unsigned long oldstarttime; 1093 1094 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1095 1096 /* Set real-time priority. */ 1097 sched_set_fifo_low(current); 1098 1099 /* Each pass through the following loop does one boost-test cycle. */ 1100 do { 1101 bool failed = false; // Test failed already in this test interval 1102 bool gp_initiated = false; 1103 1104 if (kthread_should_stop()) 1105 goto checkwait; 1106 1107 /* Wait for the next test interval. */ 1108 oldstarttime = READ_ONCE(boost_starttime); 1109 while (time_before(jiffies, oldstarttime)) { 1110 schedule_timeout_interruptible(oldstarttime - jiffies); 1111 if (stutter_wait("rcu_torture_boost")) 1112 sched_set_fifo_low(current); 1113 if (torture_must_stop()) 1114 goto checkwait; 1115 } 1116 1117 // Do one boost-test interval. 1118 endtime = oldstarttime + test_boost_duration * HZ; 1119 while (time_before(jiffies, endtime)) { 1120 // Has current GP gone too long? 1121 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1122 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1123 // If we don't have a grace period in flight, start one. 1124 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1125 gp_state = cur_ops->start_gp_poll(); 1126 gp_initiated = true; 1127 gp_state_time = jiffies; 1128 } 1129 if (stutter_wait("rcu_torture_boost")) { 1130 sched_set_fifo_low(current); 1131 // If the grace period already ended, 1132 // we don't know when that happened, so 1133 // start over. 1134 if (cur_ops->poll_gp_state(gp_state)) 1135 gp_initiated = false; 1136 } 1137 if (torture_must_stop()) 1138 goto checkwait; 1139 } 1140 1141 // In case the grace period extended beyond the end of the loop. 1142 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1143 rcu_torture_boost_failed(gp_state, &gp_state_time); 1144 1145 /* 1146 * Set the start time of the next test interval. 1147 * Yes, this is vulnerable to long delays, but such 1148 * delays simply cause a false negative for the next 1149 * interval. Besides, we are running at RT priority, 1150 * so delays should be relatively rare. 1151 */ 1152 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1153 if (mutex_trylock(&boost_mutex)) { 1154 if (oldstarttime == boost_starttime) { 1155 WRITE_ONCE(boost_starttime, 1156 jiffies + test_boost_interval * HZ); 1157 n_rcu_torture_boosts++; 1158 } 1159 mutex_unlock(&boost_mutex); 1160 break; 1161 } 1162 schedule_timeout_uninterruptible(HZ / 20); 1163 } 1164 1165 /* Go do the stutter. */ 1166 checkwait: if (stutter_wait("rcu_torture_boost")) 1167 sched_set_fifo_low(current); 1168 } while (!torture_must_stop()); 1169 1170 /* Clean up and exit. */ 1171 while (!kthread_should_stop()) { 1172 torture_shutdown_absorb("rcu_torture_boost"); 1173 schedule_timeout_uninterruptible(HZ / 20); 1174 } 1175 torture_kthread_stopping("rcu_torture_boost"); 1176 return 0; 1177 } 1178 1179 /* 1180 * RCU torture force-quiescent-state kthread. Repeatedly induces 1181 * bursts of calls to force_quiescent_state(), increasing the probability 1182 * of occurrence of some important types of race conditions. 1183 */ 1184 static int 1185 rcu_torture_fqs(void *arg) 1186 { 1187 unsigned long fqs_resume_time; 1188 int fqs_burst_remaining; 1189 int oldnice = task_nice(current); 1190 1191 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1192 do { 1193 fqs_resume_time = jiffies + fqs_stutter * HZ; 1194 while (time_before(jiffies, fqs_resume_time) && 1195 !kthread_should_stop()) { 1196 schedule_timeout_interruptible(HZ / 20); 1197 } 1198 fqs_burst_remaining = fqs_duration; 1199 while (fqs_burst_remaining > 0 && 1200 !kthread_should_stop()) { 1201 cur_ops->fqs(); 1202 udelay(fqs_holdoff); 1203 fqs_burst_remaining -= fqs_holdoff; 1204 } 1205 if (stutter_wait("rcu_torture_fqs")) 1206 sched_set_normal(current, oldnice); 1207 } while (!torture_must_stop()); 1208 torture_kthread_stopping("rcu_torture_fqs"); 1209 return 0; 1210 } 1211 1212 // Used by writers to randomly choose from the available grace-period primitives. 1213 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1214 static int nsynctypes; 1215 1216 /* 1217 * Determine which grace-period primitives are available. 1218 */ 1219 static void rcu_torture_write_types(void) 1220 { 1221 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1222 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1223 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1224 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1225 1226 /* Initialize synctype[] array. If none set, take default. */ 1227 if (!gp_cond1 && 1228 !gp_cond_exp1 && 1229 !gp_cond_full1 && 1230 !gp_cond_exp_full1 && 1231 !gp_exp1 && 1232 !gp_poll_exp1 && 1233 !gp_poll_exp_full1 && 1234 !gp_normal1 && 1235 !gp_poll1 && 1236 !gp_poll_full1 && 1237 !gp_sync1) { 1238 gp_cond1 = true; 1239 gp_cond_exp1 = true; 1240 gp_cond_full1 = true; 1241 gp_cond_exp_full1 = true; 1242 gp_exp1 = true; 1243 gp_poll_exp1 = true; 1244 gp_poll_exp_full1 = true; 1245 gp_normal1 = true; 1246 gp_poll1 = true; 1247 gp_poll_full1 = true; 1248 gp_sync1 = true; 1249 } 1250 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1251 synctype[nsynctypes++] = RTWS_COND_GET; 1252 pr_info("%s: Testing conditional GPs.\n", __func__); 1253 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1254 pr_alert("%s: gp_cond without primitives.\n", __func__); 1255 } 1256 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1257 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1258 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1259 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1260 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1261 } 1262 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1263 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1264 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1265 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1266 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1267 } 1268 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1269 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1270 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1271 } else if (gp_cond_exp_full && 1272 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1273 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1274 } 1275 if (gp_exp1 && cur_ops->exp_sync) { 1276 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1277 pr_info("%s: Testing expedited GPs.\n", __func__); 1278 } else if (gp_exp && !cur_ops->exp_sync) { 1279 pr_alert("%s: gp_exp without primitives.\n", __func__); 1280 } 1281 if (gp_normal1 && cur_ops->deferred_free) { 1282 synctype[nsynctypes++] = RTWS_DEF_FREE; 1283 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1284 } else if (gp_normal && !cur_ops->deferred_free) { 1285 pr_alert("%s: gp_normal without primitives.\n", __func__); 1286 } 1287 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1288 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1289 synctype[nsynctypes++] = RTWS_POLL_GET; 1290 pr_info("%s: Testing polling GPs.\n", __func__); 1291 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1292 pr_alert("%s: gp_poll without primitives.\n", __func__); 1293 } 1294 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1295 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1296 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1297 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1298 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1299 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1300 } 1301 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1302 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1303 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1304 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1305 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1306 } 1307 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1308 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1309 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1310 } else if (gp_poll_exp_full && 1311 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1312 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1313 } 1314 if (gp_sync1 && cur_ops->sync) { 1315 synctype[nsynctypes++] = RTWS_SYNC; 1316 pr_info("%s: Testing normal GPs.\n", __func__); 1317 } else if (gp_sync && !cur_ops->sync) { 1318 pr_alert("%s: gp_sync without primitives.\n", __func__); 1319 } 1320 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes); 1321 } 1322 1323 /* 1324 * Do the specified rcu_torture_writer() synchronous grace period, 1325 * while also testing out the polled APIs. Note well that the single-CPU 1326 * grace-period optimizations must be accounted for. 1327 */ 1328 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1329 { 1330 unsigned long cookie; 1331 struct rcu_gp_oldstate cookie_full; 1332 bool dopoll; 1333 bool dopoll_full; 1334 unsigned long r = torture_random(trsp); 1335 1336 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1337 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1338 if (dopoll || dopoll_full) 1339 cpus_read_lock(); 1340 if (dopoll) 1341 cookie = cur_ops->get_gp_state(); 1342 if (dopoll_full) 1343 cur_ops->get_gp_state_full(&cookie_full); 1344 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1345 sync(); 1346 sync(); 1347 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1348 "%s: Cookie check 3 failed %pS() online %*pbl.", 1349 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1350 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1351 "%s: Cookie check 4 failed %pS() online %*pbl", 1352 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1353 if (dopoll || dopoll_full) 1354 cpus_read_unlock(); 1355 } 1356 1357 /* 1358 * RCU torture writer kthread. Repeatedly substitutes a new structure 1359 * for that pointed to by rcu_torture_current, freeing the old structure 1360 * after a series of grace periods (the "pipeline"). 1361 */ 1362 static int 1363 rcu_torture_writer(void *arg) 1364 { 1365 bool boot_ended; 1366 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1367 unsigned long cookie; 1368 struct rcu_gp_oldstate cookie_full; 1369 int expediting = 0; 1370 unsigned long gp_snap; 1371 unsigned long gp_snap1; 1372 struct rcu_gp_oldstate gp_snap_full; 1373 struct rcu_gp_oldstate gp_snap1_full; 1374 int i; 1375 int idx; 1376 int oldnice = task_nice(current); 1377 struct rcu_gp_oldstate *rgo = NULL; 1378 int rgo_size = 0; 1379 struct rcu_torture *rp; 1380 struct rcu_torture *old_rp; 1381 static DEFINE_TORTURE_RANDOM(rand); 1382 unsigned long stallsdone = jiffies; 1383 bool stutter_waited; 1384 unsigned long *ulo = NULL; 1385 int ulo_size = 0; 1386 1387 // If a new stall test is added, this must be adjusted. 1388 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1389 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * 1390 HZ * (stall_cpu_repeat + 1); 1391 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1392 if (!can_expedite) 1393 pr_alert("%s" TORTURE_FLAG 1394 " GP expediting controlled from boot/sysfs for %s.\n", 1395 torture_type, cur_ops->name); 1396 if (WARN_ONCE(nsynctypes == 0, 1397 "%s: No update-side primitives.\n", __func__)) { 1398 /* 1399 * No updates primitives, so don't try updating. 1400 * The resulting test won't be testing much, hence the 1401 * above WARN_ONCE(). 1402 */ 1403 rcu_torture_writer_state = RTWS_STOPPING; 1404 torture_kthread_stopping("rcu_torture_writer"); 1405 return 0; 1406 } 1407 if (cur_ops->poll_active > 0) { 1408 ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL); 1409 if (!WARN_ON(!ulo)) 1410 ulo_size = cur_ops->poll_active; 1411 } 1412 if (cur_ops->poll_active_full > 0) { 1413 rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL); 1414 if (!WARN_ON(!rgo)) 1415 rgo_size = cur_ops->poll_active_full; 1416 } 1417 1418 do { 1419 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1420 torture_hrtimeout_us(500, 1000, &rand); 1421 rp = rcu_torture_alloc(); 1422 if (rp == NULL) 1423 continue; 1424 rp->rtort_pipe_count = 0; 1425 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1426 rcu_torture_writer_state = RTWS_DELAY; 1427 udelay(torture_random(&rand) & 0x3ff); 1428 rcu_torture_writer_state = RTWS_REPLACE; 1429 old_rp = rcu_dereference_check(rcu_torture_current, 1430 current == writer_task); 1431 rp->rtort_mbtest = 1; 1432 rcu_assign_pointer(rcu_torture_current, rp); 1433 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1434 if (old_rp) { 1435 i = old_rp->rtort_pipe_count; 1436 if (i > RCU_TORTURE_PIPE_LEN) 1437 i = RCU_TORTURE_PIPE_LEN; 1438 atomic_inc(&rcu_torture_wcount[i]); 1439 WRITE_ONCE(old_rp->rtort_pipe_count, 1440 old_rp->rtort_pipe_count + 1); 1441 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1442 1443 // Make sure readers block polled grace periods. 1444 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1445 idx = cur_ops->readlock(); 1446 cookie = cur_ops->get_gp_state(); 1447 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1448 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1449 __func__, 1450 rcu_torture_writer_state_getname(), 1451 rcu_torture_writer_state, 1452 cookie, cur_ops->get_gp_state()); 1453 if (cur_ops->get_comp_state) { 1454 cookie = cur_ops->get_comp_state(); 1455 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1456 } 1457 cur_ops->readunlock(idx); 1458 } 1459 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1460 idx = cur_ops->readlock(); 1461 cur_ops->get_gp_state_full(&cookie_full); 1462 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1463 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1464 __func__, 1465 rcu_torture_writer_state_getname(), 1466 rcu_torture_writer_state, 1467 cpumask_pr_args(cpu_online_mask)); 1468 if (cur_ops->get_comp_state_full) { 1469 cur_ops->get_comp_state_full(&cookie_full); 1470 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1471 } 1472 cur_ops->readunlock(idx); 1473 } 1474 switch (synctype[torture_random(&rand) % nsynctypes]) { 1475 case RTWS_DEF_FREE: 1476 rcu_torture_writer_state = RTWS_DEF_FREE; 1477 cur_ops->deferred_free(old_rp); 1478 break; 1479 case RTWS_EXP_SYNC: 1480 rcu_torture_writer_state = RTWS_EXP_SYNC; 1481 do_rtws_sync(&rand, cur_ops->exp_sync); 1482 rcu_torture_pipe_update(old_rp); 1483 break; 1484 case RTWS_COND_GET: 1485 rcu_torture_writer_state = RTWS_COND_GET; 1486 gp_snap = cur_ops->get_gp_state(); 1487 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1488 rcu_torture_writer_state = RTWS_COND_SYNC; 1489 cur_ops->cond_sync(gp_snap); 1490 rcu_torture_pipe_update(old_rp); 1491 break; 1492 case RTWS_COND_GET_EXP: 1493 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1494 gp_snap = cur_ops->get_gp_state_exp(); 1495 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1496 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1497 cur_ops->cond_sync_exp(gp_snap); 1498 rcu_torture_pipe_update(old_rp); 1499 break; 1500 case RTWS_COND_GET_FULL: 1501 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1502 cur_ops->get_gp_state_full(&gp_snap_full); 1503 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1504 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1505 cur_ops->cond_sync_full(&gp_snap_full); 1506 rcu_torture_pipe_update(old_rp); 1507 break; 1508 case RTWS_COND_GET_EXP_FULL: 1509 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1510 cur_ops->get_gp_state_full(&gp_snap_full); 1511 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1512 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1513 cur_ops->cond_sync_exp_full(&gp_snap_full); 1514 rcu_torture_pipe_update(old_rp); 1515 break; 1516 case RTWS_POLL_GET: 1517 rcu_torture_writer_state = RTWS_POLL_GET; 1518 for (i = 0; i < ulo_size; i++) 1519 ulo[i] = cur_ops->get_comp_state(); 1520 gp_snap = cur_ops->start_gp_poll(); 1521 rcu_torture_writer_state = RTWS_POLL_WAIT; 1522 while (!cur_ops->poll_gp_state(gp_snap)) { 1523 gp_snap1 = cur_ops->get_gp_state(); 1524 for (i = 0; i < ulo_size; i++) 1525 if (cur_ops->poll_gp_state(ulo[i]) || 1526 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1527 ulo[i] = gp_snap1; 1528 break; 1529 } 1530 WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size); 1531 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1532 &rand); 1533 } 1534 rcu_torture_pipe_update(old_rp); 1535 break; 1536 case RTWS_POLL_GET_FULL: 1537 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1538 for (i = 0; i < rgo_size; i++) 1539 cur_ops->get_comp_state_full(&rgo[i]); 1540 cur_ops->start_gp_poll_full(&gp_snap_full); 1541 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1542 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1543 cur_ops->get_gp_state_full(&gp_snap1_full); 1544 for (i = 0; i < rgo_size; i++) 1545 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1546 cur_ops->same_gp_state_full(&rgo[i], 1547 &gp_snap1_full)) { 1548 rgo[i] = gp_snap1_full; 1549 break; 1550 } 1551 WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size); 1552 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1553 &rand); 1554 } 1555 rcu_torture_pipe_update(old_rp); 1556 break; 1557 case RTWS_POLL_GET_EXP: 1558 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1559 gp_snap = cur_ops->start_gp_poll_exp(); 1560 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1561 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1562 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1563 &rand); 1564 rcu_torture_pipe_update(old_rp); 1565 break; 1566 case RTWS_POLL_GET_EXP_FULL: 1567 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1568 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1569 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1570 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1571 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1572 &rand); 1573 rcu_torture_pipe_update(old_rp); 1574 break; 1575 case RTWS_SYNC: 1576 rcu_torture_writer_state = RTWS_SYNC; 1577 do_rtws_sync(&rand, cur_ops->sync); 1578 rcu_torture_pipe_update(old_rp); 1579 break; 1580 default: 1581 WARN_ON_ONCE(1); 1582 break; 1583 } 1584 } 1585 WRITE_ONCE(rcu_torture_current_version, 1586 rcu_torture_current_version + 1); 1587 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1588 if (can_expedite && 1589 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1590 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1591 if (expediting >= 0) 1592 rcu_expedite_gp(); 1593 else 1594 rcu_unexpedite_gp(); 1595 if (++expediting > 3) 1596 expediting = -expediting; 1597 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1598 can_expedite = !rcu_gp_is_expedited() && 1599 !rcu_gp_is_normal(); 1600 } 1601 rcu_torture_writer_state = RTWS_STUTTER; 1602 boot_ended = rcu_inkernel_boot_has_ended(); 1603 stutter_waited = stutter_wait("rcu_torture_writer"); 1604 if (stutter_waited && 1605 !atomic_read(&rcu_fwd_cb_nodelay) && 1606 !cur_ops->slow_gps && 1607 !torture_must_stop() && 1608 boot_ended && 1609 time_after(jiffies, stallsdone)) 1610 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1611 if (list_empty(&rcu_tortures[i].rtort_free) && 1612 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1613 tracing_off(); 1614 if (cur_ops->gp_kthread_dbg) 1615 cur_ops->gp_kthread_dbg(); 1616 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1617 rcu_ftrace_dump(DUMP_ALL); 1618 } 1619 if (stutter_waited) 1620 sched_set_normal(current, oldnice); 1621 } while (!torture_must_stop()); 1622 rcu_torture_current = NULL; // Let stats task know that we are done. 1623 /* Reset expediting back to unexpedited. */ 1624 if (expediting > 0) 1625 expediting = -expediting; 1626 while (can_expedite && expediting++ < 0) 1627 rcu_unexpedite_gp(); 1628 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1629 if (!can_expedite) 1630 pr_alert("%s" TORTURE_FLAG 1631 " Dynamic grace-period expediting was disabled.\n", 1632 torture_type); 1633 kfree(ulo); 1634 kfree(rgo); 1635 rcu_torture_writer_state = RTWS_STOPPING; 1636 torture_kthread_stopping("rcu_torture_writer"); 1637 return 0; 1638 } 1639 1640 /* 1641 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1642 * delay between calls. 1643 */ 1644 static int 1645 rcu_torture_fakewriter(void *arg) 1646 { 1647 unsigned long gp_snap; 1648 struct rcu_gp_oldstate gp_snap_full; 1649 DEFINE_TORTURE_RANDOM(rand); 1650 1651 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1652 set_user_nice(current, MAX_NICE); 1653 1654 if (WARN_ONCE(nsynctypes == 0, 1655 "%s: No update-side primitives.\n", __func__)) { 1656 /* 1657 * No updates primitives, so don't try updating. 1658 * The resulting test won't be testing much, hence the 1659 * above WARN_ONCE(). 1660 */ 1661 torture_kthread_stopping("rcu_torture_fakewriter"); 1662 return 0; 1663 } 1664 1665 do { 1666 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1667 if (cur_ops->cb_barrier != NULL && 1668 torture_random(&rand) % (nfakewriters * 8) == 0) { 1669 cur_ops->cb_barrier(); 1670 } else { 1671 switch (synctype[torture_random(&rand) % nsynctypes]) { 1672 case RTWS_DEF_FREE: 1673 break; 1674 case RTWS_EXP_SYNC: 1675 cur_ops->exp_sync(); 1676 break; 1677 case RTWS_COND_GET: 1678 gp_snap = cur_ops->get_gp_state(); 1679 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1680 cur_ops->cond_sync(gp_snap); 1681 break; 1682 case RTWS_COND_GET_EXP: 1683 gp_snap = cur_ops->get_gp_state_exp(); 1684 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1685 cur_ops->cond_sync_exp(gp_snap); 1686 break; 1687 case RTWS_COND_GET_FULL: 1688 cur_ops->get_gp_state_full(&gp_snap_full); 1689 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1690 cur_ops->cond_sync_full(&gp_snap_full); 1691 break; 1692 case RTWS_COND_GET_EXP_FULL: 1693 cur_ops->get_gp_state_full(&gp_snap_full); 1694 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1695 cur_ops->cond_sync_exp_full(&gp_snap_full); 1696 break; 1697 case RTWS_POLL_GET: 1698 gp_snap = cur_ops->start_gp_poll(); 1699 while (!cur_ops->poll_gp_state(gp_snap)) { 1700 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1701 &rand); 1702 } 1703 break; 1704 case RTWS_POLL_GET_FULL: 1705 cur_ops->start_gp_poll_full(&gp_snap_full); 1706 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1707 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1708 &rand); 1709 } 1710 break; 1711 case RTWS_POLL_GET_EXP: 1712 gp_snap = cur_ops->start_gp_poll_exp(); 1713 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1714 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1715 &rand); 1716 } 1717 break; 1718 case RTWS_POLL_GET_EXP_FULL: 1719 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1720 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1721 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1722 &rand); 1723 } 1724 break; 1725 case RTWS_SYNC: 1726 cur_ops->sync(); 1727 break; 1728 default: 1729 WARN_ON_ONCE(1); 1730 break; 1731 } 1732 } 1733 stutter_wait("rcu_torture_fakewriter"); 1734 } while (!torture_must_stop()); 1735 1736 torture_kthread_stopping("rcu_torture_fakewriter"); 1737 return 0; 1738 } 1739 1740 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1741 { 1742 kfree(rhp); 1743 } 1744 1745 // Set up and carry out testing of RCU's global memory ordering 1746 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1747 struct torture_random_state *trsp) 1748 { 1749 unsigned long loops; 1750 int noc = torture_num_online_cpus(); 1751 int rdrchked; 1752 int rdrchker; 1753 struct rcu_torture_reader_check *rtrcp; // Me. 1754 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1755 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1756 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1757 1758 if (myid < 0) 1759 return; // Don't try this from timer handlers. 1760 1761 // Increment my counter. 1762 rtrcp = &rcu_torture_reader_mbchk[myid]; 1763 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1764 1765 // Attempt to assign someone else some checking work. 1766 rdrchked = torture_random(trsp) % nrealreaders; 1767 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1768 rdrchker = torture_random(trsp) % nrealreaders; 1769 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1770 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1771 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1772 !READ_ONCE(rtp->rtort_chkp) && 1773 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1774 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1775 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1776 rtrcp->rtc_chkrdr = rdrchked; 1777 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1778 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1779 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1780 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1781 } 1782 1783 // If assigned some completed work, do it! 1784 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1785 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1786 return; // No work or work not yet ready. 1787 rdrchked = rtrcp_assigner->rtc_chkrdr; 1788 if (WARN_ON_ONCE(rdrchked < 0)) 1789 return; 1790 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1791 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1792 atomic_inc(&n_rcu_torture_mbchk_tries); 1793 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1794 atomic_inc(&n_rcu_torture_mbchk_fail); 1795 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1796 rtrcp_assigner->rtc_ready = 0; 1797 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1798 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1799 } 1800 1801 /* 1802 * Do one extension of an RCU read-side critical section using the 1803 * current reader state in readstate (set to zero for initial entry 1804 * to extended critical section), set the new state as specified by 1805 * newstate (set to zero for final exit from extended critical section), 1806 * and random-number-generator state in trsp. If this is neither the 1807 * beginning or end of the critical section and if there was actually a 1808 * change, do a ->read_delay(). 1809 */ 1810 static void rcutorture_one_extend(int *readstate, int newstate, 1811 struct torture_random_state *trsp, 1812 struct rt_read_seg *rtrsp) 1813 { 1814 unsigned long flags; 1815 int idxnew1 = -1; 1816 int idxnew2 = -1; 1817 int idxold1 = *readstate; 1818 int idxold2 = idxold1; 1819 int statesnew = ~*readstate & newstate; 1820 int statesold = *readstate & ~newstate; 1821 1822 WARN_ON_ONCE(idxold2 < 0); 1823 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1824 rtrsp->rt_readstate = newstate; 1825 1826 /* First, put new protection in place to avoid critical-section gap. */ 1827 if (statesnew & RCUTORTURE_RDR_BH) 1828 local_bh_disable(); 1829 if (statesnew & RCUTORTURE_RDR_RBH) 1830 rcu_read_lock_bh(); 1831 if (statesnew & RCUTORTURE_RDR_IRQ) 1832 local_irq_disable(); 1833 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1834 preempt_disable(); 1835 if (statesnew & RCUTORTURE_RDR_SCHED) 1836 rcu_read_lock_sched(); 1837 if (statesnew & RCUTORTURE_RDR_RCU_1) 1838 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1839 if (statesnew & RCUTORTURE_RDR_RCU_2) 1840 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1841 1842 /* 1843 * Next, remove old protection, in decreasing order of strength 1844 * to avoid unlock paths that aren't safe in the stronger 1845 * context. Namely: BH can not be enabled with disabled interrupts. 1846 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1847 * context. 1848 */ 1849 if (statesold & RCUTORTURE_RDR_IRQ) 1850 local_irq_enable(); 1851 if (statesold & RCUTORTURE_RDR_PREEMPT) 1852 preempt_enable(); 1853 if (statesold & RCUTORTURE_RDR_SCHED) 1854 rcu_read_unlock_sched(); 1855 if (statesold & RCUTORTURE_RDR_BH) 1856 local_bh_enable(); 1857 if (statesold & RCUTORTURE_RDR_RBH) 1858 rcu_read_unlock_bh(); 1859 if (statesold & RCUTORTURE_RDR_RCU_2) { 1860 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1861 WARN_ON_ONCE(idxnew2 != -1); 1862 idxold2 = 0; 1863 } 1864 if (statesold & RCUTORTURE_RDR_RCU_1) { 1865 bool lockit; 1866 1867 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1868 if (lockit) 1869 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1870 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1871 WARN_ON_ONCE(idxnew1 != -1); 1872 idxold1 = 0; 1873 if (lockit) 1874 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1875 } 1876 1877 /* Delay if neither beginning nor end and there was a change. */ 1878 if ((statesnew || statesold) && *readstate && newstate) 1879 cur_ops->read_delay(trsp, rtrsp); 1880 1881 /* Update the reader state. */ 1882 if (idxnew1 == -1) 1883 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1884 WARN_ON_ONCE(idxnew1 < 0); 1885 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1886 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1887 if (idxnew2 == -1) 1888 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1889 WARN_ON_ONCE(idxnew2 < 0); 1890 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1891 *readstate = idxnew1 | idxnew2 | newstate; 1892 WARN_ON_ONCE(*readstate < 0); 1893 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1894 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1895 } 1896 1897 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1898 static int rcutorture_extend_mask_max(void) 1899 { 1900 int mask; 1901 1902 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1903 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1904 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1905 return mask; 1906 } 1907 1908 /* Return a random protection state mask, but with at least one bit set. */ 1909 static int 1910 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1911 { 1912 int mask = rcutorture_extend_mask_max(); 1913 unsigned long randmask1 = torture_random(trsp); 1914 unsigned long randmask2 = randmask1 >> 3; 1915 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1916 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1917 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1918 1919 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1920 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1921 if (!(randmask1 & 0x7)) 1922 mask = mask & randmask2; 1923 else 1924 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1925 1926 // Can't have nested RCU reader without outer RCU reader. 1927 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1928 if (oldmask & RCUTORTURE_RDR_RCU_1) 1929 mask &= ~RCUTORTURE_RDR_RCU_2; 1930 else 1931 mask |= RCUTORTURE_RDR_RCU_1; 1932 } 1933 1934 /* 1935 * Can't enable bh w/irq disabled. 1936 */ 1937 if (mask & RCUTORTURE_RDR_IRQ) 1938 mask |= oldmask & bhs; 1939 1940 /* 1941 * Ideally these sequences would be detected in debug builds 1942 * (regardless of RT), but until then don't stop testing 1943 * them on non-RT. 1944 */ 1945 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1946 /* Can't modify BH in atomic context */ 1947 if (oldmask & preempts_irq) 1948 mask &= ~bhs; 1949 if ((oldmask | mask) & preempts_irq) 1950 mask |= oldmask & bhs; 1951 } 1952 1953 return mask ?: RCUTORTURE_RDR_RCU_1; 1954 } 1955 1956 /* 1957 * Do a randomly selected number of extensions of an existing RCU read-side 1958 * critical section. 1959 */ 1960 static struct rt_read_seg * 1961 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1962 struct rt_read_seg *rtrsp) 1963 { 1964 int i; 1965 int j; 1966 int mask = rcutorture_extend_mask_max(); 1967 1968 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1969 if (!((mask - 1) & mask)) 1970 return rtrsp; /* Current RCU reader not extendable. */ 1971 /* Bias towards larger numbers of loops. */ 1972 i = torture_random(trsp); 1973 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1974 for (j = 0; j < i; j++) { 1975 mask = rcutorture_extend_mask(*readstate, trsp); 1976 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1977 } 1978 return &rtrsp[j]; 1979 } 1980 1981 /* 1982 * Do one read-side critical section, returning false if there was 1983 * no data to read. Can be invoked both from process context and 1984 * from a timer handler. 1985 */ 1986 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1987 { 1988 bool checkpolling = !(torture_random(trsp) & 0xfff); 1989 unsigned long cookie; 1990 struct rcu_gp_oldstate cookie_full; 1991 int i; 1992 unsigned long started; 1993 unsigned long completed; 1994 int newstate; 1995 struct rcu_torture *p; 1996 int pipe_count; 1997 int readstate = 0; 1998 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1999 struct rt_read_seg *rtrsp = &rtseg[0]; 2000 struct rt_read_seg *rtrsp1; 2001 unsigned long long ts; 2002 2003 WARN_ON_ONCE(!rcu_is_watching()); 2004 newstate = rcutorture_extend_mask(readstate, trsp); 2005 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 2006 if (checkpolling) { 2007 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2008 cookie = cur_ops->get_gp_state(); 2009 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2010 cur_ops->get_gp_state_full(&cookie_full); 2011 } 2012 started = cur_ops->get_gp_seq(); 2013 ts = rcu_trace_clock_local(); 2014 p = rcu_dereference_check(rcu_torture_current, 2015 !cur_ops->readlock_held || cur_ops->readlock_held()); 2016 if (p == NULL) { 2017 /* Wait for rcu_torture_writer to get underway */ 2018 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2019 return false; 2020 } 2021 if (p->rtort_mbtest == 0) 2022 atomic_inc(&n_rcu_torture_mberror); 2023 rcu_torture_reader_do_mbchk(myid, p, trsp); 2024 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 2025 preempt_disable(); 2026 pipe_count = READ_ONCE(p->rtort_pipe_count); 2027 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2028 // Should not happen in a correct RCU implementation, 2029 // happens quite often for torture_type=busted. 2030 pipe_count = RCU_TORTURE_PIPE_LEN; 2031 } 2032 completed = cur_ops->get_gp_seq(); 2033 if (pipe_count > 1) { 2034 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 2035 ts, started, completed); 2036 rcu_ftrace_dump(DUMP_ALL); 2037 } 2038 __this_cpu_inc(rcu_torture_count[pipe_count]); 2039 completed = rcutorture_seq_diff(completed, started); 2040 if (completed > RCU_TORTURE_PIPE_LEN) { 2041 /* Should not happen, but... */ 2042 completed = RCU_TORTURE_PIPE_LEN; 2043 } 2044 __this_cpu_inc(rcu_torture_batch[completed]); 2045 preempt_enable(); 2046 if (checkpolling) { 2047 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2048 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2049 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2050 __func__, 2051 rcu_torture_writer_state_getname(), 2052 rcu_torture_writer_state, 2053 cookie, cur_ops->get_gp_state()); 2054 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2055 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2056 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2057 __func__, 2058 rcu_torture_writer_state_getname(), 2059 rcu_torture_writer_state, 2060 cpumask_pr_args(cpu_online_mask)); 2061 } 2062 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2063 WARN_ON_ONCE(readstate); 2064 // This next splat is expected behavior if leakpointer, especially 2065 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2066 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2067 2068 /* If error or close call, record the sequence of reader protections. */ 2069 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2070 i = 0; 2071 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2072 err_segs[i++] = *rtrsp1; 2073 rt_read_nsegs = i; 2074 } 2075 2076 return true; 2077 } 2078 2079 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2080 2081 /* 2082 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2083 * incrementing the corresponding element of the pipeline array. The 2084 * counter in the element should never be greater than 1, otherwise, the 2085 * RCU implementation is broken. 2086 */ 2087 static void rcu_torture_timer(struct timer_list *unused) 2088 { 2089 atomic_long_inc(&n_rcu_torture_timers); 2090 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2091 2092 /* Test call_rcu() invocation from interrupt handler. */ 2093 if (cur_ops->call) { 2094 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2095 2096 if (rhp) 2097 cur_ops->call(rhp, rcu_torture_timer_cb); 2098 } 2099 } 2100 2101 /* 2102 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2103 * incrementing the corresponding element of the pipeline array. The 2104 * counter in the element should never be greater than 1, otherwise, the 2105 * RCU implementation is broken. 2106 */ 2107 static int 2108 rcu_torture_reader(void *arg) 2109 { 2110 unsigned long lastsleep = jiffies; 2111 long myid = (long)arg; 2112 int mynumonline = myid; 2113 DEFINE_TORTURE_RANDOM(rand); 2114 struct timer_list t; 2115 2116 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2117 set_user_nice(current, MAX_NICE); 2118 if (irqreader && cur_ops->irq_capable) 2119 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2120 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2121 do { 2122 if (irqreader && cur_ops->irq_capable) { 2123 if (!timer_pending(&t)) 2124 mod_timer(&t, jiffies + 1); 2125 } 2126 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2127 schedule_timeout_interruptible(HZ); 2128 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2129 torture_hrtimeout_us(500, 1000, &rand); 2130 lastsleep = jiffies + 10; 2131 } 2132 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2133 schedule_timeout_interruptible(HZ / 5); 2134 stutter_wait("rcu_torture_reader"); 2135 } while (!torture_must_stop()); 2136 if (irqreader && cur_ops->irq_capable) { 2137 del_timer_sync(&t); 2138 destroy_timer_on_stack(&t); 2139 } 2140 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2141 torture_kthread_stopping("rcu_torture_reader"); 2142 return 0; 2143 } 2144 2145 /* 2146 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2147 * increase race probabilities and fuzzes the interval between toggling. 2148 */ 2149 static int rcu_nocb_toggle(void *arg) 2150 { 2151 int cpu; 2152 int maxcpu = -1; 2153 int oldnice = task_nice(current); 2154 long r; 2155 DEFINE_TORTURE_RANDOM(rand); 2156 ktime_t toggle_delay; 2157 unsigned long toggle_fuzz; 2158 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2159 2160 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2161 while (!rcu_inkernel_boot_has_ended()) 2162 schedule_timeout_interruptible(HZ / 10); 2163 for_each_possible_cpu(cpu) 2164 maxcpu = cpu; 2165 WARN_ON(maxcpu < 0); 2166 if (toggle_interval > ULONG_MAX) 2167 toggle_fuzz = ULONG_MAX >> 3; 2168 else 2169 toggle_fuzz = toggle_interval >> 3; 2170 if (toggle_fuzz <= 0) 2171 toggle_fuzz = NSEC_PER_USEC; 2172 do { 2173 r = torture_random(&rand); 2174 cpu = (r >> 1) % (maxcpu + 1); 2175 if (r & 0x1) { 2176 rcu_nocb_cpu_offload(cpu); 2177 atomic_long_inc(&n_nocb_offload); 2178 } else { 2179 rcu_nocb_cpu_deoffload(cpu); 2180 atomic_long_inc(&n_nocb_deoffload); 2181 } 2182 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2183 set_current_state(TASK_INTERRUPTIBLE); 2184 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2185 if (stutter_wait("rcu_nocb_toggle")) 2186 sched_set_normal(current, oldnice); 2187 } while (!torture_must_stop()); 2188 torture_kthread_stopping("rcu_nocb_toggle"); 2189 return 0; 2190 } 2191 2192 /* 2193 * Print torture statistics. Caller must ensure that there is only 2194 * one call to this function at a given time!!! This is normally 2195 * accomplished by relying on the module system to only have one copy 2196 * of the module loaded, and then by giving the rcu_torture_stats 2197 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2198 * thread is not running). 2199 */ 2200 static void 2201 rcu_torture_stats_print(void) 2202 { 2203 int cpu; 2204 int i; 2205 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2206 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2207 struct rcu_torture *rtcp; 2208 static unsigned long rtcv_snap = ULONG_MAX; 2209 static bool splatted; 2210 struct task_struct *wtp; 2211 2212 for_each_possible_cpu(cpu) { 2213 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2214 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2215 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2216 } 2217 } 2218 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2219 if (pipesummary[i] != 0) 2220 break; 2221 } 2222 2223 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2224 rtcp = rcu_access_pointer(rcu_torture_current); 2225 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2226 rtcp, 2227 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2228 rcu_torture_current_version, 2229 list_empty(&rcu_torture_freelist), 2230 atomic_read(&n_rcu_torture_alloc), 2231 atomic_read(&n_rcu_torture_alloc_fail), 2232 atomic_read(&n_rcu_torture_free)); 2233 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2234 atomic_read(&n_rcu_torture_mberror), 2235 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2236 n_rcu_torture_barrier_error, 2237 n_rcu_torture_boost_ktrerror); 2238 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2239 n_rcu_torture_boost_failure, 2240 n_rcu_torture_boosts, 2241 atomic_long_read(&n_rcu_torture_timers)); 2242 torture_onoff_stats(); 2243 pr_cont("barrier: %ld/%ld:%ld ", 2244 data_race(n_barrier_successes), 2245 data_race(n_barrier_attempts), 2246 data_race(n_rcu_torture_barrier_error)); 2247 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2248 pr_cont("nocb-toggles: %ld:%ld\n", 2249 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2250 2251 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2252 if (atomic_read(&n_rcu_torture_mberror) || 2253 atomic_read(&n_rcu_torture_mbchk_fail) || 2254 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2255 n_rcu_torture_boost_failure || i > 1) { 2256 pr_cont("%s", "!!! "); 2257 atomic_inc(&n_rcu_torture_error); 2258 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2259 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2260 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2261 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2262 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2263 WARN_ON_ONCE(i > 1); // Too-short grace period 2264 } 2265 pr_cont("Reader Pipe: "); 2266 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2267 pr_cont(" %ld", pipesummary[i]); 2268 pr_cont("\n"); 2269 2270 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2271 pr_cont("Reader Batch: "); 2272 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2273 pr_cont(" %ld", batchsummary[i]); 2274 pr_cont("\n"); 2275 2276 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2277 pr_cont("Free-Block Circulation: "); 2278 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2279 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2280 } 2281 pr_cont("\n"); 2282 2283 if (cur_ops->stats) 2284 cur_ops->stats(); 2285 if (rtcv_snap == rcu_torture_current_version && 2286 rcu_access_pointer(rcu_torture_current) && 2287 !rcu_stall_is_suppressed()) { 2288 int __maybe_unused flags = 0; 2289 unsigned long __maybe_unused gp_seq = 0; 2290 2291 if (cur_ops->get_gp_data) 2292 cur_ops->get_gp_data(&flags, &gp_seq); 2293 wtp = READ_ONCE(writer_task); 2294 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2295 rcu_torture_writer_state_getname(), 2296 rcu_torture_writer_state, gp_seq, flags, 2297 wtp == NULL ? ~0U : wtp->__state, 2298 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2299 if (!splatted && wtp) { 2300 sched_show_task(wtp); 2301 splatted = true; 2302 } 2303 if (cur_ops->gp_kthread_dbg) 2304 cur_ops->gp_kthread_dbg(); 2305 rcu_ftrace_dump(DUMP_ALL); 2306 } 2307 rtcv_snap = rcu_torture_current_version; 2308 } 2309 2310 /* 2311 * Periodically prints torture statistics, if periodic statistics printing 2312 * was specified via the stat_interval module parameter. 2313 */ 2314 static int 2315 rcu_torture_stats(void *arg) 2316 { 2317 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2318 do { 2319 schedule_timeout_interruptible(stat_interval * HZ); 2320 rcu_torture_stats_print(); 2321 torture_shutdown_absorb("rcu_torture_stats"); 2322 } while (!torture_must_stop()); 2323 torture_kthread_stopping("rcu_torture_stats"); 2324 return 0; 2325 } 2326 2327 /* Test mem_dump_obj() and friends. */ 2328 static void rcu_torture_mem_dump_obj(void) 2329 { 2330 struct rcu_head *rhp; 2331 struct kmem_cache *kcp; 2332 static int z; 2333 2334 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2335 if (WARN_ON_ONCE(!kcp)) 2336 return; 2337 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2338 if (WARN_ON_ONCE(!rhp)) { 2339 kmem_cache_destroy(kcp); 2340 return; 2341 } 2342 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2343 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2344 mem_dump_obj(ZERO_SIZE_PTR); 2345 pr_alert("mem_dump_obj(NULL):"); 2346 mem_dump_obj(NULL); 2347 pr_alert("mem_dump_obj(%px):", &rhp); 2348 mem_dump_obj(&rhp); 2349 pr_alert("mem_dump_obj(%px):", rhp); 2350 mem_dump_obj(rhp); 2351 pr_alert("mem_dump_obj(%px):", &rhp->func); 2352 mem_dump_obj(&rhp->func); 2353 pr_alert("mem_dump_obj(%px):", &z); 2354 mem_dump_obj(&z); 2355 kmem_cache_free(kcp, rhp); 2356 kmem_cache_destroy(kcp); 2357 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2358 if (WARN_ON_ONCE(!rhp)) 2359 return; 2360 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2361 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2362 mem_dump_obj(rhp); 2363 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2364 mem_dump_obj(&rhp->func); 2365 kfree(rhp); 2366 rhp = vmalloc(4096); 2367 if (WARN_ON_ONCE(!rhp)) 2368 return; 2369 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2370 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2371 mem_dump_obj(rhp); 2372 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2373 mem_dump_obj(&rhp->func); 2374 vfree(rhp); 2375 } 2376 2377 static void 2378 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2379 { 2380 pr_alert("%s" TORTURE_FLAG 2381 "--- %s: nreaders=%d nfakewriters=%d " 2382 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2383 "shuffle_interval=%d stutter=%d irqreader=%d " 2384 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2385 "test_boost=%d/%d test_boost_interval=%d " 2386 "test_boost_duration=%d shutdown_secs=%d " 2387 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2388 "stall_cpu_block=%d stall_cpu_repeat=%d " 2389 "n_barrier_cbs=%d " 2390 "onoff_interval=%d onoff_holdoff=%d " 2391 "read_exit_delay=%d read_exit_burst=%d " 2392 "nocbs_nthreads=%d nocbs_toggle=%d " 2393 "test_nmis=%d\n", 2394 torture_type, tag, nrealreaders, nfakewriters, 2395 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2396 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2397 test_boost, cur_ops->can_boost, 2398 test_boost_interval, test_boost_duration, shutdown_secs, 2399 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2400 stall_cpu_block, stall_cpu_repeat, 2401 n_barrier_cbs, 2402 onoff_interval, onoff_holdoff, 2403 read_exit_delay, read_exit_burst, 2404 nocbs_nthreads, nocbs_toggle, 2405 test_nmis); 2406 } 2407 2408 static int rcutorture_booster_cleanup(unsigned int cpu) 2409 { 2410 struct task_struct *t; 2411 2412 if (boost_tasks[cpu] == NULL) 2413 return 0; 2414 mutex_lock(&boost_mutex); 2415 t = boost_tasks[cpu]; 2416 boost_tasks[cpu] = NULL; 2417 rcu_torture_enable_rt_throttle(); 2418 mutex_unlock(&boost_mutex); 2419 2420 /* This must be outside of the mutex, otherwise deadlock! */ 2421 torture_stop_kthread(rcu_torture_boost, t); 2422 return 0; 2423 } 2424 2425 static int rcutorture_booster_init(unsigned int cpu) 2426 { 2427 int retval; 2428 2429 if (boost_tasks[cpu] != NULL) 2430 return 0; /* Already created, nothing more to do. */ 2431 2432 // Testing RCU priority boosting requires rcutorture do 2433 // some serious abuse. Counter this by running ksoftirqd 2434 // at higher priority. 2435 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2436 struct sched_param sp; 2437 struct task_struct *t; 2438 2439 t = per_cpu(ksoftirqd, cpu); 2440 WARN_ON_ONCE(!t); 2441 sp.sched_priority = 2; 2442 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2443 } 2444 2445 /* Don't allow time recalculation while creating a new task. */ 2446 mutex_lock(&boost_mutex); 2447 rcu_torture_disable_rt_throttle(); 2448 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2449 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2450 cpu, "rcu_torture_boost_%u"); 2451 if (IS_ERR(boost_tasks[cpu])) { 2452 retval = PTR_ERR(boost_tasks[cpu]); 2453 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2454 n_rcu_torture_boost_ktrerror++; 2455 boost_tasks[cpu] = NULL; 2456 mutex_unlock(&boost_mutex); 2457 return retval; 2458 } 2459 mutex_unlock(&boost_mutex); 2460 return 0; 2461 } 2462 2463 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2464 { 2465 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2466 return NOTIFY_OK; 2467 } 2468 2469 static struct notifier_block rcu_torture_stall_block = { 2470 .notifier_call = rcu_torture_stall_nf, 2471 }; 2472 2473 /* 2474 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2475 * induces a CPU stall for the time specified by stall_cpu. If a new 2476 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 2477 */ 2478 static void rcu_torture_stall_one(int rep, int irqsoff) 2479 { 2480 int idx; 2481 unsigned long stop_at; 2482 2483 if (stall_cpu_holdoff > 0) { 2484 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2485 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2486 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2487 } 2488 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2489 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2490 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2491 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2492 if (kthread_should_stop()) 2493 break; 2494 schedule_timeout_uninterruptible(HZ); 2495 } 2496 } 2497 if (!kthread_should_stop() && stall_cpu > 0) { 2498 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2499 stop_at = ktime_get_seconds() + stall_cpu; 2500 /* RCU CPU stall is expected behavior in following code. */ 2501 idx = cur_ops->readlock(); 2502 if (irqsoff) 2503 local_irq_disable(); 2504 else if (!stall_cpu_block) 2505 preempt_disable(); 2506 pr_alert("%s start stall episode %d on CPU %d.\n", 2507 __func__, rep + 1, raw_smp_processor_id()); 2508 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 2509 !kthread_should_stop()) 2510 if (stall_cpu_block) { 2511 #ifdef CONFIG_PREEMPTION 2512 preempt_schedule(); 2513 #else 2514 schedule_timeout_uninterruptible(HZ); 2515 #endif 2516 } else if (stall_no_softlockup) { 2517 touch_softlockup_watchdog(); 2518 } 2519 if (irqsoff) 2520 local_irq_enable(); 2521 else if (!stall_cpu_block) 2522 preempt_enable(); 2523 cur_ops->readunlock(idx); 2524 } 2525 } 2526 2527 /* 2528 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many 2529 * additional times as specified by the stall_cpu_repeat module parameter. 2530 * Note that stall_cpu_irqsoff is ignored on the second and subsequent 2531 * stall. 2532 */ 2533 static int rcu_torture_stall(void *args) 2534 { 2535 int i; 2536 int repeat = stall_cpu_repeat; 2537 int ret; 2538 2539 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2540 if (repeat < 0) { 2541 repeat = 0; 2542 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); 2543 } 2544 if (rcu_cpu_stall_notifiers) { 2545 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 2546 if (ret) 2547 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 2548 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 2549 } 2550 for (i = 0; i <= repeat; i++) { 2551 if (kthread_should_stop()) 2552 break; 2553 rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0); 2554 } 2555 pr_alert("%s end.\n", __func__); 2556 if (rcu_cpu_stall_notifiers && !ret) { 2557 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 2558 if (ret) 2559 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 2560 } 2561 torture_shutdown_absorb("rcu_torture_stall"); 2562 while (!kthread_should_stop()) 2563 schedule_timeout_interruptible(10 * HZ); 2564 return 0; 2565 } 2566 2567 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2568 static int __init rcu_torture_stall_init(void) 2569 { 2570 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2571 return 0; 2572 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2573 } 2574 2575 /* State structure for forward-progress self-propagating RCU callback. */ 2576 struct fwd_cb_state { 2577 struct rcu_head rh; 2578 int stop; 2579 }; 2580 2581 /* 2582 * Forward-progress self-propagating RCU callback function. Because 2583 * callbacks run from softirq, this function is an implicit RCU read-side 2584 * critical section. 2585 */ 2586 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2587 { 2588 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2589 2590 if (READ_ONCE(fcsp->stop)) { 2591 WRITE_ONCE(fcsp->stop, 2); 2592 return; 2593 } 2594 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2595 } 2596 2597 /* State for continuous-flood RCU callbacks. */ 2598 struct rcu_fwd_cb { 2599 struct rcu_head rh; 2600 struct rcu_fwd_cb *rfc_next; 2601 struct rcu_fwd *rfc_rfp; 2602 int rfc_gps; 2603 }; 2604 2605 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2606 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2607 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2608 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2609 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2610 2611 struct rcu_launder_hist { 2612 long n_launders; 2613 unsigned long launder_gp_seq; 2614 }; 2615 2616 struct rcu_fwd { 2617 spinlock_t rcu_fwd_lock; 2618 struct rcu_fwd_cb *rcu_fwd_cb_head; 2619 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2620 long n_launders_cb; 2621 unsigned long rcu_fwd_startat; 2622 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2623 unsigned long rcu_launder_gp_seq_start; 2624 int rcu_fwd_id; 2625 }; 2626 2627 static DEFINE_MUTEX(rcu_fwd_mutex); 2628 static struct rcu_fwd *rcu_fwds; 2629 static unsigned long rcu_fwd_seq; 2630 static atomic_long_t rcu_fwd_max_cbs; 2631 static bool rcu_fwd_emergency_stop; 2632 2633 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2634 { 2635 unsigned long gps; 2636 unsigned long gps_old; 2637 int i; 2638 int j; 2639 2640 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2641 if (rfp->n_launders_hist[i].n_launders > 0) 2642 break; 2643 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2644 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2645 gps_old = rfp->rcu_launder_gp_seq_start; 2646 for (j = 0; j <= i; j++) { 2647 gps = rfp->n_launders_hist[j].launder_gp_seq; 2648 pr_cont(" %ds/%d: %ld:%ld", 2649 j + 1, FWD_CBS_HIST_DIV, 2650 rfp->n_launders_hist[j].n_launders, 2651 rcutorture_seq_diff(gps, gps_old)); 2652 gps_old = gps; 2653 } 2654 pr_cont("\n"); 2655 } 2656 2657 /* Callback function for continuous-flood RCU callbacks. */ 2658 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2659 { 2660 unsigned long flags; 2661 int i; 2662 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2663 struct rcu_fwd_cb **rfcpp; 2664 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2665 2666 rfcp->rfc_next = NULL; 2667 rfcp->rfc_gps++; 2668 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2669 rfcpp = rfp->rcu_fwd_cb_tail; 2670 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2671 smp_store_release(rfcpp, rfcp); 2672 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2673 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2674 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2675 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2676 rfp->n_launders_hist[i].n_launders++; 2677 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2678 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2679 } 2680 2681 // Give the scheduler a chance, even on nohz_full CPUs. 2682 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2683 { 2684 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2685 // Real call_rcu() floods hit userspace, so emulate that. 2686 if (need_resched() || (iter & 0xfff)) 2687 schedule(); 2688 return; 2689 } 2690 // No userspace emulation: CB invocation throttles call_rcu() 2691 cond_resched(); 2692 } 2693 2694 /* 2695 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2696 * test is over or because we hit an OOM event. 2697 */ 2698 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2699 { 2700 unsigned long flags; 2701 unsigned long freed = 0; 2702 struct rcu_fwd_cb *rfcp; 2703 2704 for (;;) { 2705 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2706 rfcp = rfp->rcu_fwd_cb_head; 2707 if (!rfcp) { 2708 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2709 break; 2710 } 2711 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2712 if (!rfp->rcu_fwd_cb_head) 2713 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2714 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2715 kfree(rfcp); 2716 freed++; 2717 rcu_torture_fwd_prog_cond_resched(freed); 2718 if (tick_nohz_full_enabled()) { 2719 local_irq_save(flags); 2720 rcu_momentary_eqs(); 2721 local_irq_restore(flags); 2722 } 2723 } 2724 return freed; 2725 } 2726 2727 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2728 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2729 int *tested, int *tested_tries) 2730 { 2731 unsigned long cver; 2732 unsigned long dur; 2733 struct fwd_cb_state fcs; 2734 unsigned long gps; 2735 int idx; 2736 int sd; 2737 int sd4; 2738 bool selfpropcb = false; 2739 unsigned long stopat; 2740 static DEFINE_TORTURE_RANDOM(trs); 2741 2742 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2743 if (!cur_ops->sync) 2744 return; // Cannot do need_resched() forward progress testing without ->sync. 2745 if (cur_ops->call && cur_ops->cb_barrier) { 2746 init_rcu_head_on_stack(&fcs.rh); 2747 selfpropcb = true; 2748 } 2749 2750 /* Tight loop containing cond_resched(). */ 2751 atomic_inc(&rcu_fwd_cb_nodelay); 2752 cur_ops->sync(); /* Later readers see above write. */ 2753 if (selfpropcb) { 2754 WRITE_ONCE(fcs.stop, 0); 2755 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2756 } 2757 cver = READ_ONCE(rcu_torture_current_version); 2758 gps = cur_ops->get_gp_seq(); 2759 sd = cur_ops->stall_dur() + 1; 2760 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2761 dur = sd4 + torture_random(&trs) % (sd - sd4); 2762 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2763 stopat = rfp->rcu_fwd_startat + dur; 2764 while (time_before(jiffies, stopat) && 2765 !shutdown_time_arrived() && 2766 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2767 idx = cur_ops->readlock(); 2768 udelay(10); 2769 cur_ops->readunlock(idx); 2770 if (!fwd_progress_need_resched || need_resched()) 2771 cond_resched(); 2772 } 2773 (*tested_tries)++; 2774 if (!time_before(jiffies, stopat) && 2775 !shutdown_time_arrived() && 2776 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2777 (*tested)++; 2778 cver = READ_ONCE(rcu_torture_current_version) - cver; 2779 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2780 WARN_ON(!cver && gps < 2); 2781 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2782 rfp->rcu_fwd_id, dur, cver, gps); 2783 } 2784 if (selfpropcb) { 2785 WRITE_ONCE(fcs.stop, 1); 2786 cur_ops->sync(); /* Wait for running CB to complete. */ 2787 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2788 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2789 } 2790 2791 if (selfpropcb) { 2792 WARN_ON(READ_ONCE(fcs.stop) != 2); 2793 destroy_rcu_head_on_stack(&fcs.rh); 2794 } 2795 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2796 atomic_dec(&rcu_fwd_cb_nodelay); 2797 } 2798 2799 /* Carry out call_rcu() forward-progress testing. */ 2800 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2801 { 2802 unsigned long cver; 2803 unsigned long flags; 2804 unsigned long gps; 2805 int i; 2806 long n_launders; 2807 long n_launders_cb_snap; 2808 long n_launders_sa; 2809 long n_max_cbs; 2810 long n_max_gps; 2811 struct rcu_fwd_cb *rfcp; 2812 struct rcu_fwd_cb *rfcpn; 2813 unsigned long stopat; 2814 unsigned long stoppedat; 2815 2816 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2817 if (READ_ONCE(rcu_fwd_emergency_stop)) 2818 return; /* Get out of the way quickly, no GP wait! */ 2819 if (!cur_ops->call) 2820 return; /* Can't do call_rcu() fwd prog without ->call. */ 2821 2822 /* Loop continuously posting RCU callbacks. */ 2823 atomic_inc(&rcu_fwd_cb_nodelay); 2824 cur_ops->sync(); /* Later readers see above write. */ 2825 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2826 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2827 n_launders = 0; 2828 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2829 n_launders_sa = 0; 2830 n_max_cbs = 0; 2831 n_max_gps = 0; 2832 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2833 rfp->n_launders_hist[i].n_launders = 0; 2834 cver = READ_ONCE(rcu_torture_current_version); 2835 gps = cur_ops->get_gp_seq(); 2836 rfp->rcu_launder_gp_seq_start = gps; 2837 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2838 while (time_before(jiffies, stopat) && 2839 !shutdown_time_arrived() && 2840 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2841 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2842 rfcpn = NULL; 2843 if (rfcp) 2844 rfcpn = READ_ONCE(rfcp->rfc_next); 2845 if (rfcpn) { 2846 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2847 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2848 break; 2849 rfp->rcu_fwd_cb_head = rfcpn; 2850 n_launders++; 2851 n_launders_sa++; 2852 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2853 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2854 if (WARN_ON_ONCE(!rfcp)) { 2855 schedule_timeout_interruptible(1); 2856 continue; 2857 } 2858 n_max_cbs++; 2859 n_launders_sa = 0; 2860 rfcp->rfc_gps = 0; 2861 rfcp->rfc_rfp = rfp; 2862 } else { 2863 rfcp = NULL; 2864 } 2865 if (rfcp) 2866 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2867 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2868 if (tick_nohz_full_enabled()) { 2869 local_irq_save(flags); 2870 rcu_momentary_eqs(); 2871 local_irq_restore(flags); 2872 } 2873 } 2874 stoppedat = jiffies; 2875 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2876 cver = READ_ONCE(rcu_torture_current_version) - cver; 2877 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2878 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2879 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2880 (void)rcu_torture_fwd_prog_cbfree(rfp); 2881 2882 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2883 !shutdown_time_arrived()) { 2884 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 2885 cur_ops->gp_kthread_dbg(); 2886 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 2887 __func__, 2888 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2889 n_launders + n_max_cbs - n_launders_cb_snap, 2890 n_launders, n_launders_sa, 2891 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 2892 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2893 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2894 rcu_torture_fwd_cb_hist(rfp); 2895 mutex_unlock(&rcu_fwd_mutex); 2896 } 2897 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2898 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2899 atomic_dec(&rcu_fwd_cb_nodelay); 2900 } 2901 2902 2903 /* 2904 * OOM notifier, but this only prints diagnostic information for the 2905 * current forward-progress test. 2906 */ 2907 static int rcutorture_oom_notify(struct notifier_block *self, 2908 unsigned long notused, void *nfreed) 2909 { 2910 int i; 2911 long ncbs; 2912 struct rcu_fwd *rfp; 2913 2914 mutex_lock(&rcu_fwd_mutex); 2915 rfp = rcu_fwds; 2916 if (!rfp) { 2917 mutex_unlock(&rcu_fwd_mutex); 2918 return NOTIFY_OK; 2919 } 2920 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2921 __func__); 2922 for (i = 0; i < fwd_progress; i++) { 2923 rcu_torture_fwd_cb_hist(&rfp[i]); 2924 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2925 } 2926 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2927 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2928 ncbs = 0; 2929 for (i = 0; i < fwd_progress; i++) 2930 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2931 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2932 cur_ops->cb_barrier(); 2933 ncbs = 0; 2934 for (i = 0; i < fwd_progress; i++) 2935 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2936 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2937 cur_ops->cb_barrier(); 2938 ncbs = 0; 2939 for (i = 0; i < fwd_progress; i++) 2940 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2941 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2942 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2943 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2944 pr_info("%s returning after OOM processing.\n", __func__); 2945 mutex_unlock(&rcu_fwd_mutex); 2946 return NOTIFY_OK; 2947 } 2948 2949 static struct notifier_block rcutorture_oom_nb = { 2950 .notifier_call = rcutorture_oom_notify 2951 }; 2952 2953 /* Carry out grace-period forward-progress testing. */ 2954 static int rcu_torture_fwd_prog(void *args) 2955 { 2956 bool firsttime = true; 2957 long max_cbs; 2958 int oldnice = task_nice(current); 2959 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2960 struct rcu_fwd *rfp = args; 2961 int tested = 0; 2962 int tested_tries = 0; 2963 2964 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2965 rcu_bind_current_to_nocb(); 2966 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2967 set_user_nice(current, MAX_NICE); 2968 do { 2969 if (!rfp->rcu_fwd_id) { 2970 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2971 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2972 if (!firsttime) { 2973 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2974 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2975 } 2976 firsttime = false; 2977 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2978 } else { 2979 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2980 schedule_timeout_interruptible(HZ / 20); 2981 oldseq = READ_ONCE(rcu_fwd_seq); 2982 } 2983 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2984 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2985 rcu_torture_fwd_prog_cr(rfp); 2986 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2987 (!IS_ENABLED(CONFIG_TINY_RCU) || 2988 (rcu_inkernel_boot_has_ended() && 2989 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2990 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2991 2992 /* Avoid slow periods, better to test when busy. */ 2993 if (stutter_wait("rcu_torture_fwd_prog")) 2994 sched_set_normal(current, oldnice); 2995 } while (!torture_must_stop()); 2996 /* Short runs might not contain a valid forward-progress attempt. */ 2997 if (!rfp->rcu_fwd_id) { 2998 WARN_ON(!tested && tested_tries >= 5); 2999 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 3000 } 3001 torture_kthread_stopping("rcu_torture_fwd_prog"); 3002 return 0; 3003 } 3004 3005 /* If forward-progress checking is requested and feasible, spawn the thread. */ 3006 static int __init rcu_torture_fwd_prog_init(void) 3007 { 3008 int i; 3009 int ret = 0; 3010 struct rcu_fwd *rfp; 3011 3012 if (!fwd_progress) 3013 return 0; /* Not requested, so don't do it. */ 3014 if (fwd_progress >= nr_cpu_ids) { 3015 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 3016 fwd_progress = nr_cpu_ids; 3017 } else if (fwd_progress < 0) { 3018 fwd_progress = nr_cpu_ids; 3019 } 3020 if ((!cur_ops->sync && !cur_ops->call) || 3021 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 3022 cur_ops == &rcu_busted_ops) { 3023 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 3024 fwd_progress = 0; 3025 return 0; 3026 } 3027 if (stall_cpu > 0) { 3028 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 3029 fwd_progress = 0; 3030 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 3031 return -EINVAL; /* In module, can fail back to user. */ 3032 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 3033 return 0; 3034 } 3035 if (fwd_progress_holdoff <= 0) 3036 fwd_progress_holdoff = 1; 3037 if (fwd_progress_div <= 0) 3038 fwd_progress_div = 4; 3039 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 3040 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 3041 if (!rfp || !fwd_prog_tasks) { 3042 kfree(rfp); 3043 kfree(fwd_prog_tasks); 3044 fwd_prog_tasks = NULL; 3045 fwd_progress = 0; 3046 return -ENOMEM; 3047 } 3048 for (i = 0; i < fwd_progress; i++) { 3049 spin_lock_init(&rfp[i].rcu_fwd_lock); 3050 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3051 rfp[i].rcu_fwd_id = i; 3052 } 3053 mutex_lock(&rcu_fwd_mutex); 3054 rcu_fwds = rfp; 3055 mutex_unlock(&rcu_fwd_mutex); 3056 register_oom_notifier(&rcutorture_oom_nb); 3057 for (i = 0; i < fwd_progress; i++) { 3058 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3059 if (ret) { 3060 fwd_progress = i; 3061 return ret; 3062 } 3063 } 3064 return 0; 3065 } 3066 3067 static void rcu_torture_fwd_prog_cleanup(void) 3068 { 3069 int i; 3070 struct rcu_fwd *rfp; 3071 3072 if (!rcu_fwds || !fwd_prog_tasks) 3073 return; 3074 for (i = 0; i < fwd_progress; i++) 3075 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3076 unregister_oom_notifier(&rcutorture_oom_nb); 3077 mutex_lock(&rcu_fwd_mutex); 3078 rfp = rcu_fwds; 3079 rcu_fwds = NULL; 3080 mutex_unlock(&rcu_fwd_mutex); 3081 kfree(rfp); 3082 kfree(fwd_prog_tasks); 3083 fwd_prog_tasks = NULL; 3084 } 3085 3086 /* Callback function for RCU barrier testing. */ 3087 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3088 { 3089 atomic_inc(&barrier_cbs_invoked); 3090 } 3091 3092 /* IPI handler to get callback posted on desired CPU, if online. */ 3093 static int rcu_torture_barrier1cb(void *rcu_void) 3094 { 3095 struct rcu_head *rhp = rcu_void; 3096 3097 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3098 return 0; 3099 } 3100 3101 /* kthread function to register callbacks used to test RCU barriers. */ 3102 static int rcu_torture_barrier_cbs(void *arg) 3103 { 3104 long myid = (long)arg; 3105 bool lastphase = false; 3106 bool newphase; 3107 struct rcu_head rcu; 3108 3109 init_rcu_head_on_stack(&rcu); 3110 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3111 set_user_nice(current, MAX_NICE); 3112 do { 3113 wait_event(barrier_cbs_wq[myid], 3114 (newphase = 3115 smp_load_acquire(&barrier_phase)) != lastphase || 3116 torture_must_stop()); 3117 lastphase = newphase; 3118 if (torture_must_stop()) 3119 break; 3120 /* 3121 * The above smp_load_acquire() ensures barrier_phase load 3122 * is ordered before the following ->call(). 3123 */ 3124 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3125 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3126 3127 if (atomic_dec_and_test(&barrier_cbs_count)) 3128 wake_up(&barrier_wq); 3129 } while (!torture_must_stop()); 3130 if (cur_ops->cb_barrier != NULL) 3131 cur_ops->cb_barrier(); 3132 destroy_rcu_head_on_stack(&rcu); 3133 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3134 return 0; 3135 } 3136 3137 /* kthread function to drive and coordinate RCU barrier testing. */ 3138 static int rcu_torture_barrier(void *arg) 3139 { 3140 int i; 3141 3142 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3143 do { 3144 atomic_set(&barrier_cbs_invoked, 0); 3145 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3146 /* Ensure barrier_phase ordered after prior assignments. */ 3147 smp_store_release(&barrier_phase, !barrier_phase); 3148 for (i = 0; i < n_barrier_cbs; i++) 3149 wake_up(&barrier_cbs_wq[i]); 3150 wait_event(barrier_wq, 3151 atomic_read(&barrier_cbs_count) == 0 || 3152 torture_must_stop()); 3153 if (torture_must_stop()) 3154 break; 3155 n_barrier_attempts++; 3156 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3157 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3158 n_rcu_torture_barrier_error++; 3159 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3160 atomic_read(&barrier_cbs_invoked), 3161 n_barrier_cbs); 3162 WARN_ON(1); 3163 // Wait manually for the remaining callbacks 3164 i = 0; 3165 do { 3166 if (WARN_ON(i++ > HZ)) 3167 i = INT_MIN; 3168 schedule_timeout_interruptible(1); 3169 cur_ops->cb_barrier(); 3170 } while (atomic_read(&barrier_cbs_invoked) != 3171 n_barrier_cbs && 3172 !torture_must_stop()); 3173 smp_mb(); // Can't trust ordering if broken. 3174 if (!torture_must_stop()) 3175 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3176 atomic_read(&barrier_cbs_invoked)); 3177 } else { 3178 n_barrier_successes++; 3179 } 3180 schedule_timeout_interruptible(HZ / 10); 3181 } while (!torture_must_stop()); 3182 torture_kthread_stopping("rcu_torture_barrier"); 3183 return 0; 3184 } 3185 3186 /* Initialize RCU barrier testing. */ 3187 static int rcu_torture_barrier_init(void) 3188 { 3189 int i; 3190 int ret; 3191 3192 if (n_barrier_cbs <= 0) 3193 return 0; 3194 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3195 pr_alert("%s" TORTURE_FLAG 3196 " Call or barrier ops missing for %s,\n", 3197 torture_type, cur_ops->name); 3198 pr_alert("%s" TORTURE_FLAG 3199 " RCU barrier testing omitted from run.\n", 3200 torture_type); 3201 return 0; 3202 } 3203 atomic_set(&barrier_cbs_count, 0); 3204 atomic_set(&barrier_cbs_invoked, 0); 3205 barrier_cbs_tasks = 3206 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3207 GFP_KERNEL); 3208 barrier_cbs_wq = 3209 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3210 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3211 return -ENOMEM; 3212 for (i = 0; i < n_barrier_cbs; i++) { 3213 init_waitqueue_head(&barrier_cbs_wq[i]); 3214 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3215 (void *)(long)i, 3216 barrier_cbs_tasks[i]); 3217 if (ret) 3218 return ret; 3219 } 3220 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3221 } 3222 3223 /* Clean up after RCU barrier testing. */ 3224 static void rcu_torture_barrier_cleanup(void) 3225 { 3226 int i; 3227 3228 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3229 if (barrier_cbs_tasks != NULL) { 3230 for (i = 0; i < n_barrier_cbs; i++) 3231 torture_stop_kthread(rcu_torture_barrier_cbs, 3232 barrier_cbs_tasks[i]); 3233 kfree(barrier_cbs_tasks); 3234 barrier_cbs_tasks = NULL; 3235 } 3236 if (barrier_cbs_wq != NULL) { 3237 kfree(barrier_cbs_wq); 3238 barrier_cbs_wq = NULL; 3239 } 3240 } 3241 3242 static bool rcu_torture_can_boost(void) 3243 { 3244 static int boost_warn_once; 3245 int prio; 3246 3247 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3248 return false; 3249 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3250 return false; 3251 3252 prio = rcu_get_gp_kthreads_prio(); 3253 if (!prio) 3254 return false; 3255 3256 if (prio < 2) { 3257 if (boost_warn_once == 1) 3258 return false; 3259 3260 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3261 boost_warn_once = 1; 3262 return false; 3263 } 3264 3265 return true; 3266 } 3267 3268 static bool read_exit_child_stop; 3269 static bool read_exit_child_stopped; 3270 static wait_queue_head_t read_exit_wq; 3271 3272 // Child kthread which just does an rcutorture reader and exits. 3273 static int rcu_torture_read_exit_child(void *trsp_in) 3274 { 3275 struct torture_random_state *trsp = trsp_in; 3276 3277 set_user_nice(current, MAX_NICE); 3278 // Minimize time between reading and exiting. 3279 while (!kthread_should_stop()) 3280 schedule_timeout_uninterruptible(HZ / 20); 3281 (void)rcu_torture_one_read(trsp, -1); 3282 return 0; 3283 } 3284 3285 // Parent kthread which creates and destroys read-exit child kthreads. 3286 static int rcu_torture_read_exit(void *unused) 3287 { 3288 bool errexit = false; 3289 int i; 3290 struct task_struct *tsp; 3291 DEFINE_TORTURE_RANDOM(trs); 3292 3293 // Allocate and initialize. 3294 set_user_nice(current, MAX_NICE); 3295 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3296 3297 // Each pass through this loop does one read-exit episode. 3298 do { 3299 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3300 for (i = 0; i < read_exit_burst; i++) { 3301 if (READ_ONCE(read_exit_child_stop)) 3302 break; 3303 stutter_wait("rcu_torture_read_exit"); 3304 // Spawn child. 3305 tsp = kthread_run(rcu_torture_read_exit_child, 3306 &trs, "%s", "rcu_torture_read_exit_child"); 3307 if (IS_ERR(tsp)) { 3308 TOROUT_ERRSTRING("out of memory"); 3309 errexit = true; 3310 break; 3311 } 3312 cond_resched(); 3313 kthread_stop(tsp); 3314 n_read_exits++; 3315 } 3316 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3317 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3318 i = 0; 3319 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3320 schedule_timeout_uninterruptible(HZ); 3321 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3322 3323 // Clean up and exit. 3324 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3325 smp_mb(); // Store before wakeup. 3326 wake_up(&read_exit_wq); 3327 while (!torture_must_stop()) 3328 schedule_timeout_uninterruptible(HZ / 20); 3329 torture_kthread_stopping("rcu_torture_read_exit"); 3330 return 0; 3331 } 3332 3333 static int rcu_torture_read_exit_init(void) 3334 { 3335 if (read_exit_burst <= 0) 3336 return 0; 3337 init_waitqueue_head(&read_exit_wq); 3338 read_exit_child_stop = false; 3339 read_exit_child_stopped = false; 3340 return torture_create_kthread(rcu_torture_read_exit, NULL, 3341 read_exit_task); 3342 } 3343 3344 static void rcu_torture_read_exit_cleanup(void) 3345 { 3346 if (!read_exit_task) 3347 return; 3348 WRITE_ONCE(read_exit_child_stop, true); 3349 smp_mb(); // Above write before wait. 3350 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3351 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3352 } 3353 3354 static void rcutorture_test_nmis(int n) 3355 { 3356 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3357 int cpu; 3358 int dumpcpu; 3359 int i; 3360 3361 for (i = 0; i < n; i++) { 3362 preempt_disable(); 3363 cpu = smp_processor_id(); 3364 dumpcpu = cpu + 1; 3365 if (dumpcpu >= nr_cpu_ids) 3366 dumpcpu = 0; 3367 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3368 dump_cpu_task(dumpcpu); 3369 preempt_enable(); 3370 schedule_timeout_uninterruptible(15 * HZ); 3371 } 3372 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3373 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3374 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3375 } 3376 3377 static enum cpuhp_state rcutor_hp; 3378 3379 static void 3380 rcu_torture_cleanup(void) 3381 { 3382 int firsttime; 3383 int flags = 0; 3384 unsigned long gp_seq = 0; 3385 int i; 3386 3387 if (torture_cleanup_begin()) { 3388 if (cur_ops->cb_barrier != NULL) { 3389 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3390 cur_ops->cb_barrier(); 3391 } 3392 if (cur_ops->gp_slow_unregister) 3393 cur_ops->gp_slow_unregister(NULL); 3394 return; 3395 } 3396 if (!cur_ops) { 3397 torture_cleanup_end(); 3398 return; 3399 } 3400 3401 rcutorture_test_nmis(test_nmis); 3402 3403 if (cur_ops->gp_kthread_dbg) 3404 cur_ops->gp_kthread_dbg(); 3405 rcu_torture_read_exit_cleanup(); 3406 rcu_torture_barrier_cleanup(); 3407 rcu_torture_fwd_prog_cleanup(); 3408 torture_stop_kthread(rcu_torture_stall, stall_task); 3409 torture_stop_kthread(rcu_torture_writer, writer_task); 3410 3411 if (nocb_tasks) { 3412 for (i = 0; i < nrealnocbers; i++) 3413 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3414 kfree(nocb_tasks); 3415 nocb_tasks = NULL; 3416 } 3417 3418 if (reader_tasks) { 3419 for (i = 0; i < nrealreaders; i++) 3420 torture_stop_kthread(rcu_torture_reader, 3421 reader_tasks[i]); 3422 kfree(reader_tasks); 3423 reader_tasks = NULL; 3424 } 3425 kfree(rcu_torture_reader_mbchk); 3426 rcu_torture_reader_mbchk = NULL; 3427 3428 if (fakewriter_tasks) { 3429 for (i = 0; i < nfakewriters; i++) 3430 torture_stop_kthread(rcu_torture_fakewriter, 3431 fakewriter_tasks[i]); 3432 kfree(fakewriter_tasks); 3433 fakewriter_tasks = NULL; 3434 } 3435 3436 if (cur_ops->get_gp_data) 3437 cur_ops->get_gp_data(&flags, &gp_seq); 3438 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3439 cur_ops->name, (long)gp_seq, flags, 3440 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3441 torture_stop_kthread(rcu_torture_stats, stats_task); 3442 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3443 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3444 cpuhp_remove_state(rcutor_hp); 3445 3446 /* 3447 * Wait for all RCU callbacks to fire, then do torture-type-specific 3448 * cleanup operations. 3449 */ 3450 if (cur_ops->cb_barrier != NULL) { 3451 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3452 cur_ops->cb_barrier(); 3453 } 3454 if (cur_ops->cleanup != NULL) 3455 cur_ops->cleanup(); 3456 3457 rcu_torture_mem_dump_obj(); 3458 3459 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3460 3461 if (err_segs_recorded) { 3462 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3463 if (rt_read_nsegs == 0) 3464 pr_alert("\t: No segments recorded!!!\n"); 3465 firsttime = 1; 3466 for (i = 0; i < rt_read_nsegs; i++) { 3467 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3468 if (err_segs[i].rt_delay_jiffies != 0) { 3469 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3470 err_segs[i].rt_delay_jiffies); 3471 firsttime = 0; 3472 } 3473 if (err_segs[i].rt_delay_ms != 0) { 3474 pr_cont("%s%ldms", firsttime ? "" : "+", 3475 err_segs[i].rt_delay_ms); 3476 firsttime = 0; 3477 } 3478 if (err_segs[i].rt_delay_us != 0) { 3479 pr_cont("%s%ldus", firsttime ? "" : "+", 3480 err_segs[i].rt_delay_us); 3481 firsttime = 0; 3482 } 3483 pr_cont("%s\n", 3484 err_segs[i].rt_preempted ? "preempted" : ""); 3485 3486 } 3487 } 3488 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3489 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3490 else if (torture_onoff_failures()) 3491 rcu_torture_print_module_parms(cur_ops, 3492 "End of test: RCU_HOTPLUG"); 3493 else 3494 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3495 torture_cleanup_end(); 3496 if (cur_ops->gp_slow_unregister) 3497 cur_ops->gp_slow_unregister(NULL); 3498 } 3499 3500 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3501 { 3502 } 3503 3504 static void rcu_torture_err_cb(struct rcu_head *rhp) 3505 { 3506 /* 3507 * This -might- happen due to race conditions, but is unlikely. 3508 * The scenario that leads to this happening is that the 3509 * first of the pair of duplicate callbacks is queued, 3510 * someone else starts a grace period that includes that 3511 * callback, then the second of the pair must wait for the 3512 * next grace period. Unlikely, but can happen. If it 3513 * does happen, the debug-objects subsystem won't have splatted. 3514 */ 3515 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3516 } 3517 3518 /* 3519 * Verify that double-free causes debug-objects to complain, but only 3520 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3521 * cannot be carried out. 3522 */ 3523 static void rcu_test_debug_objects(void) 3524 { 3525 struct rcu_head rh1; 3526 struct rcu_head rh2; 3527 int idx; 3528 3529 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 3530 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 3531 KBUILD_MODNAME, cur_ops->name); 3532 return; 3533 } 3534 3535 if (WARN_ON_ONCE(cur_ops->debug_objects && 3536 (!cur_ops->call || !cur_ops->cb_barrier))) 3537 return; 3538 3539 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3540 3541 init_rcu_head_on_stack(&rh1); 3542 init_rcu_head_on_stack(&rh2); 3543 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 3544 3545 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3546 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 3547 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3548 cur_ops->call(&rh2, rcu_torture_leak_cb); 3549 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3550 if (rhp) { 3551 cur_ops->call(rhp, rcu_torture_leak_cb); 3552 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3553 } 3554 cur_ops->readunlock(idx); 3555 3556 /* Wait for them all to get done so we can safely return. */ 3557 cur_ops->cb_barrier(); 3558 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 3559 destroy_rcu_head_on_stack(&rh1); 3560 destroy_rcu_head_on_stack(&rh2); 3561 kfree(rhp); 3562 } 3563 3564 static void rcutorture_sync(void) 3565 { 3566 static unsigned long n; 3567 3568 if (cur_ops->sync && !(++n & 0xfff)) 3569 cur_ops->sync(); 3570 } 3571 3572 static DEFINE_MUTEX(mut0); 3573 static DEFINE_MUTEX(mut1); 3574 static DEFINE_MUTEX(mut2); 3575 static DEFINE_MUTEX(mut3); 3576 static DEFINE_MUTEX(mut4); 3577 static DEFINE_MUTEX(mut5); 3578 static DEFINE_MUTEX(mut6); 3579 static DEFINE_MUTEX(mut7); 3580 static DEFINE_MUTEX(mut8); 3581 static DEFINE_MUTEX(mut9); 3582 3583 static DECLARE_RWSEM(rwsem0); 3584 static DECLARE_RWSEM(rwsem1); 3585 static DECLARE_RWSEM(rwsem2); 3586 static DECLARE_RWSEM(rwsem3); 3587 static DECLARE_RWSEM(rwsem4); 3588 static DECLARE_RWSEM(rwsem5); 3589 static DECLARE_RWSEM(rwsem6); 3590 static DECLARE_RWSEM(rwsem7); 3591 static DECLARE_RWSEM(rwsem8); 3592 static DECLARE_RWSEM(rwsem9); 3593 3594 DEFINE_STATIC_SRCU(srcu0); 3595 DEFINE_STATIC_SRCU(srcu1); 3596 DEFINE_STATIC_SRCU(srcu2); 3597 DEFINE_STATIC_SRCU(srcu3); 3598 DEFINE_STATIC_SRCU(srcu4); 3599 DEFINE_STATIC_SRCU(srcu5); 3600 DEFINE_STATIC_SRCU(srcu6); 3601 DEFINE_STATIC_SRCU(srcu7); 3602 DEFINE_STATIC_SRCU(srcu8); 3603 DEFINE_STATIC_SRCU(srcu9); 3604 3605 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 3606 int cyclelen, int deadlock) 3607 { 3608 int j = i + 1; 3609 3610 if (j >= cyclelen) 3611 j = deadlock ? 0 : -1; 3612 if (j >= 0) 3613 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 3614 else 3615 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 3616 return j; 3617 } 3618 3619 // Test lockdep on SRCU-based deadlock scenarios. 3620 static void rcu_torture_init_srcu_lockdep(void) 3621 { 3622 int cyclelen; 3623 int deadlock; 3624 bool err = false; 3625 int i; 3626 int j; 3627 int idx; 3628 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 3629 &mut5, &mut6, &mut7, &mut8, &mut9 }; 3630 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 3631 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 3632 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 3633 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 3634 int testtype; 3635 3636 if (!test_srcu_lockdep) 3637 return; 3638 3639 deadlock = test_srcu_lockdep / 1000; 3640 testtype = (test_srcu_lockdep / 10) % 100; 3641 cyclelen = test_srcu_lockdep % 10; 3642 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 3643 if (WARN_ONCE(deadlock != !!deadlock, 3644 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 3645 __func__, test_srcu_lockdep, deadlock)) 3646 err = true; 3647 if (WARN_ONCE(cyclelen <= 0, 3648 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 3649 __func__, test_srcu_lockdep, cyclelen)) 3650 err = true; 3651 if (err) 3652 goto err_out; 3653 3654 if (testtype == 0) { 3655 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 3656 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3657 if (deadlock && cyclelen == 1) 3658 pr_info("%s: Expect hang.\n", __func__); 3659 for (i = 0; i < cyclelen; i++) { 3660 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 3661 "srcu_read_unlock", i, cyclelen, deadlock); 3662 idx = srcu_read_lock(srcus[i]); 3663 if (j >= 0) 3664 synchronize_srcu(srcus[j]); 3665 srcu_read_unlock(srcus[i], idx); 3666 } 3667 return; 3668 } 3669 3670 if (testtype == 1) { 3671 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 3672 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3673 for (i = 0; i < cyclelen; i++) { 3674 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 3675 __func__, i, i, i, i); 3676 idx = srcu_read_lock(srcus[i]); 3677 mutex_lock(muts[i]); 3678 mutex_unlock(muts[i]); 3679 srcu_read_unlock(srcus[i], idx); 3680 3681 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 3682 "mutex_unlock", i, cyclelen, deadlock); 3683 mutex_lock(muts[i]); 3684 if (j >= 0) 3685 synchronize_srcu(srcus[j]); 3686 mutex_unlock(muts[i]); 3687 } 3688 return; 3689 } 3690 3691 if (testtype == 2) { 3692 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 3693 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3694 for (i = 0; i < cyclelen; i++) { 3695 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 3696 __func__, i, i, i, i); 3697 idx = srcu_read_lock(srcus[i]); 3698 down_read(rwsems[i]); 3699 up_read(rwsems[i]); 3700 srcu_read_unlock(srcus[i], idx); 3701 3702 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 3703 "up_write", i, cyclelen, deadlock); 3704 down_write(rwsems[i]); 3705 if (j >= 0) 3706 synchronize_srcu(srcus[j]); 3707 up_write(rwsems[i]); 3708 } 3709 return; 3710 } 3711 3712 #ifdef CONFIG_TASKS_TRACE_RCU 3713 if (testtype == 3) { 3714 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 3715 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3716 if (deadlock && cyclelen == 1) 3717 pr_info("%s: Expect hang.\n", __func__); 3718 for (i = 0; i < cyclelen; i++) { 3719 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 3720 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 3721 : "synchronize_srcu"; 3722 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 3723 3724 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 3725 if (i == 0) 3726 rcu_read_lock_trace(); 3727 else 3728 idx = srcu_read_lock(srcus[i]); 3729 if (j >= 0) { 3730 if (i == cyclelen - 1) 3731 synchronize_rcu_tasks_trace(); 3732 else 3733 synchronize_srcu(srcus[j]); 3734 } 3735 if (i == 0) 3736 rcu_read_unlock_trace(); 3737 else 3738 srcu_read_unlock(srcus[i], idx); 3739 } 3740 return; 3741 } 3742 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 3743 3744 err_out: 3745 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 3746 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 3747 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 3748 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 3749 pr_info("%s: L: Cycle length.\n", __func__); 3750 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 3751 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 3752 } 3753 3754 static int __init 3755 rcu_torture_init(void) 3756 { 3757 long i; 3758 int cpu; 3759 int firsterr = 0; 3760 int flags = 0; 3761 unsigned long gp_seq = 0; 3762 static struct rcu_torture_ops *torture_ops[] = { 3763 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3764 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3765 &trivial_ops, 3766 }; 3767 3768 if (!torture_init_begin(torture_type, verbose)) 3769 return -EBUSY; 3770 3771 /* Process args and tell the world that the torturer is on the job. */ 3772 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3773 cur_ops = torture_ops[i]; 3774 if (strcmp(torture_type, cur_ops->name) == 0) 3775 break; 3776 } 3777 if (i == ARRAY_SIZE(torture_ops)) { 3778 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3779 torture_type); 3780 pr_alert("rcu-torture types:"); 3781 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3782 pr_cont(" %s", torture_ops[i]->name); 3783 pr_cont("\n"); 3784 firsterr = -EINVAL; 3785 cur_ops = NULL; 3786 goto unwind; 3787 } 3788 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3789 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3790 fqs_duration = 0; 3791 } 3792 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 3793 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3794 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 3795 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 3796 nocbs_nthreads = 0; 3797 } 3798 if (cur_ops->init) 3799 cur_ops->init(); 3800 3801 rcu_torture_init_srcu_lockdep(); 3802 3803 if (nreaders >= 0) { 3804 nrealreaders = nreaders; 3805 } else { 3806 nrealreaders = num_online_cpus() - 2 - nreaders; 3807 if (nrealreaders <= 0) 3808 nrealreaders = 1; 3809 } 3810 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3811 if (cur_ops->get_gp_data) 3812 cur_ops->get_gp_data(&flags, &gp_seq); 3813 start_gp_seq = gp_seq; 3814 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3815 cur_ops->name, (long)gp_seq, flags); 3816 3817 /* Set up the freelist. */ 3818 3819 INIT_LIST_HEAD(&rcu_torture_freelist); 3820 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3821 rcu_tortures[i].rtort_mbtest = 0; 3822 list_add_tail(&rcu_tortures[i].rtort_free, 3823 &rcu_torture_freelist); 3824 } 3825 3826 /* Initialize the statistics so that each run gets its own numbers. */ 3827 3828 rcu_torture_current = NULL; 3829 rcu_torture_current_version = 0; 3830 atomic_set(&n_rcu_torture_alloc, 0); 3831 atomic_set(&n_rcu_torture_alloc_fail, 0); 3832 atomic_set(&n_rcu_torture_free, 0); 3833 atomic_set(&n_rcu_torture_mberror, 0); 3834 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3835 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3836 atomic_set(&n_rcu_torture_error, 0); 3837 n_rcu_torture_barrier_error = 0; 3838 n_rcu_torture_boost_ktrerror = 0; 3839 n_rcu_torture_boost_failure = 0; 3840 n_rcu_torture_boosts = 0; 3841 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3842 atomic_set(&rcu_torture_wcount[i], 0); 3843 for_each_possible_cpu(cpu) { 3844 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3845 per_cpu(rcu_torture_count, cpu)[i] = 0; 3846 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3847 } 3848 } 3849 err_segs_recorded = 0; 3850 rt_read_nsegs = 0; 3851 3852 /* Start up the kthreads. */ 3853 3854 rcu_torture_write_types(); 3855 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3856 writer_task); 3857 if (torture_init_error(firsterr)) 3858 goto unwind; 3859 if (nfakewriters > 0) { 3860 fakewriter_tasks = kcalloc(nfakewriters, 3861 sizeof(fakewriter_tasks[0]), 3862 GFP_KERNEL); 3863 if (fakewriter_tasks == NULL) { 3864 TOROUT_ERRSTRING("out of memory"); 3865 firsterr = -ENOMEM; 3866 goto unwind; 3867 } 3868 } 3869 for (i = 0; i < nfakewriters; i++) { 3870 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3871 NULL, fakewriter_tasks[i]); 3872 if (torture_init_error(firsterr)) 3873 goto unwind; 3874 } 3875 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3876 GFP_KERNEL); 3877 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3878 GFP_KERNEL); 3879 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3880 TOROUT_ERRSTRING("out of memory"); 3881 firsterr = -ENOMEM; 3882 goto unwind; 3883 } 3884 for (i = 0; i < nrealreaders; i++) { 3885 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3886 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3887 reader_tasks[i]); 3888 if (torture_init_error(firsterr)) 3889 goto unwind; 3890 } 3891 nrealnocbers = nocbs_nthreads; 3892 if (WARN_ON(nrealnocbers < 0)) 3893 nrealnocbers = 1; 3894 if (WARN_ON(nocbs_toggle < 0)) 3895 nocbs_toggle = HZ; 3896 if (nrealnocbers > 0) { 3897 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3898 if (nocb_tasks == NULL) { 3899 TOROUT_ERRSTRING("out of memory"); 3900 firsterr = -ENOMEM; 3901 goto unwind; 3902 } 3903 } else { 3904 nocb_tasks = NULL; 3905 } 3906 for (i = 0; i < nrealnocbers; i++) { 3907 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3908 if (torture_init_error(firsterr)) 3909 goto unwind; 3910 } 3911 if (stat_interval > 0) { 3912 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3913 stats_task); 3914 if (torture_init_error(firsterr)) 3915 goto unwind; 3916 } 3917 if (test_no_idle_hz && shuffle_interval > 0) { 3918 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3919 if (torture_init_error(firsterr)) 3920 goto unwind; 3921 } 3922 if (stutter < 0) 3923 stutter = 0; 3924 if (stutter) { 3925 int t; 3926 3927 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3928 firsterr = torture_stutter_init(stutter * HZ, t); 3929 if (torture_init_error(firsterr)) 3930 goto unwind; 3931 } 3932 if (fqs_duration < 0) 3933 fqs_duration = 0; 3934 if (fqs_holdoff < 0) 3935 fqs_holdoff = 0; 3936 if (fqs_duration && fqs_holdoff) { 3937 /* Create the fqs thread */ 3938 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3939 fqs_task); 3940 if (torture_init_error(firsterr)) 3941 goto unwind; 3942 } 3943 if (test_boost_interval < 1) 3944 test_boost_interval = 1; 3945 if (test_boost_duration < 2) 3946 test_boost_duration = 2; 3947 if (rcu_torture_can_boost()) { 3948 3949 boost_starttime = jiffies + test_boost_interval * HZ; 3950 3951 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3952 rcutorture_booster_init, 3953 rcutorture_booster_cleanup); 3954 rcutor_hp = firsterr; 3955 if (torture_init_error(firsterr)) 3956 goto unwind; 3957 } 3958 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3959 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3960 if (torture_init_error(firsterr)) 3961 goto unwind; 3962 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3963 rcutorture_sync); 3964 if (torture_init_error(firsterr)) 3965 goto unwind; 3966 firsterr = rcu_torture_stall_init(); 3967 if (torture_init_error(firsterr)) 3968 goto unwind; 3969 firsterr = rcu_torture_fwd_prog_init(); 3970 if (torture_init_error(firsterr)) 3971 goto unwind; 3972 firsterr = rcu_torture_barrier_init(); 3973 if (torture_init_error(firsterr)) 3974 goto unwind; 3975 firsterr = rcu_torture_read_exit_init(); 3976 if (torture_init_error(firsterr)) 3977 goto unwind; 3978 if (object_debug) 3979 rcu_test_debug_objects(); 3980 torture_init_end(); 3981 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 3982 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 3983 return 0; 3984 3985 unwind: 3986 torture_init_end(); 3987 rcu_torture_cleanup(); 3988 if (shutdown_secs) { 3989 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3990 kernel_power_off(); 3991 } 3992 return firsterr; 3993 } 3994 3995 module_init(rcu_torture_init); 3996 module_exit(rcu_torture_cleanup); 3997