1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 /* Bits for ->extendables field, extendables param, and related definitions. */ 59 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 62 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 64 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 65 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 66 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 67 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 68 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 69 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 70 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 71 #define RCUTORTURE_MAX_EXTEND \ 72 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 73 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 74 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 75 /* Must be power of two minus one. */ 76 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 77 78 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 79 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 80 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 81 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 82 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 83 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 84 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 85 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 86 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 87 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 88 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 89 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 90 torture_param(bool, gp_cond_exp_full, false, 91 "Use conditional/async full-stateexpedited GP wait primitives"); 92 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 93 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 94 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 95 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 96 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 97 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 98 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 99 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 100 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 101 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 102 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 103 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 104 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 105 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 106 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 107 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 108 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 109 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 110 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 114 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 115 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 116 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 117 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 118 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 119 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 120 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 121 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 122 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 123 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 124 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 125 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 126 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 127 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 128 129 static char *torture_type = "rcu"; 130 module_param(torture_type, charp, 0444); 131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 132 133 static int nrealnocbers; 134 static int nrealreaders; 135 static struct task_struct *writer_task; 136 static struct task_struct **fakewriter_tasks; 137 static struct task_struct **reader_tasks; 138 static struct task_struct **nocb_tasks; 139 static struct task_struct *stats_task; 140 static struct task_struct *fqs_task; 141 static struct task_struct *boost_tasks[NR_CPUS]; 142 static struct task_struct *stall_task; 143 static struct task_struct **fwd_prog_tasks; 144 static struct task_struct **barrier_cbs_tasks; 145 static struct task_struct *barrier_task; 146 static struct task_struct *read_exit_task; 147 148 #define RCU_TORTURE_PIPE_LEN 10 149 150 // Mailbox-like structure to check RCU global memory ordering. 151 struct rcu_torture_reader_check { 152 unsigned long rtc_myloops; 153 int rtc_chkrdr; 154 unsigned long rtc_chkloops; 155 int rtc_ready; 156 struct rcu_torture_reader_check *rtc_assigner; 157 } ____cacheline_internodealigned_in_smp; 158 159 // Update-side data structure used to check RCU readers. 160 struct rcu_torture { 161 struct rcu_head rtort_rcu; 162 int rtort_pipe_count; 163 struct list_head rtort_free; 164 int rtort_mbtest; 165 struct rcu_torture_reader_check *rtort_chkp; 166 }; 167 168 static LIST_HEAD(rcu_torture_freelist); 169 static struct rcu_torture __rcu *rcu_torture_current; 170 static unsigned long rcu_torture_current_version; 171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 172 static DEFINE_SPINLOCK(rcu_torture_lock); 173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 177 static atomic_t n_rcu_torture_alloc; 178 static atomic_t n_rcu_torture_alloc_fail; 179 static atomic_t n_rcu_torture_free; 180 static atomic_t n_rcu_torture_mberror; 181 static atomic_t n_rcu_torture_mbchk_fail; 182 static atomic_t n_rcu_torture_mbchk_tries; 183 static atomic_t n_rcu_torture_error; 184 static long n_rcu_torture_barrier_error; 185 static long n_rcu_torture_boost_ktrerror; 186 static long n_rcu_torture_boost_failure; 187 static long n_rcu_torture_boosts; 188 static atomic_long_t n_rcu_torture_timers; 189 static long n_barrier_attempts; 190 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 191 static unsigned long n_read_exits; 192 static struct list_head rcu_torture_removed; 193 static unsigned long shutdown_jiffies; 194 static unsigned long start_gp_seq; 195 static atomic_long_t n_nocb_offload; 196 static atomic_long_t n_nocb_deoffload; 197 198 static int rcu_torture_writer_state; 199 #define RTWS_FIXED_DELAY 0 200 #define RTWS_DELAY 1 201 #define RTWS_REPLACE 2 202 #define RTWS_DEF_FREE 3 203 #define RTWS_EXP_SYNC 4 204 #define RTWS_COND_GET 5 205 #define RTWS_COND_GET_FULL 6 206 #define RTWS_COND_GET_EXP 7 207 #define RTWS_COND_GET_EXP_FULL 8 208 #define RTWS_COND_SYNC 9 209 #define RTWS_COND_SYNC_FULL 10 210 #define RTWS_COND_SYNC_EXP 11 211 #define RTWS_COND_SYNC_EXP_FULL 12 212 #define RTWS_POLL_GET 13 213 #define RTWS_POLL_GET_FULL 14 214 #define RTWS_POLL_GET_EXP 15 215 #define RTWS_POLL_GET_EXP_FULL 16 216 #define RTWS_POLL_WAIT 17 217 #define RTWS_POLL_WAIT_FULL 18 218 #define RTWS_POLL_WAIT_EXP 19 219 #define RTWS_POLL_WAIT_EXP_FULL 20 220 #define RTWS_SYNC 21 221 #define RTWS_STUTTER 22 222 #define RTWS_STOPPING 23 223 static const char * const rcu_torture_writer_state_names[] = { 224 "RTWS_FIXED_DELAY", 225 "RTWS_DELAY", 226 "RTWS_REPLACE", 227 "RTWS_DEF_FREE", 228 "RTWS_EXP_SYNC", 229 "RTWS_COND_GET", 230 "RTWS_COND_GET_FULL", 231 "RTWS_COND_GET_EXP", 232 "RTWS_COND_GET_EXP_FULL", 233 "RTWS_COND_SYNC", 234 "RTWS_COND_SYNC_FULL", 235 "RTWS_COND_SYNC_EXP", 236 "RTWS_COND_SYNC_EXP_FULL", 237 "RTWS_POLL_GET", 238 "RTWS_POLL_GET_FULL", 239 "RTWS_POLL_GET_EXP", 240 "RTWS_POLL_GET_EXP_FULL", 241 "RTWS_POLL_WAIT", 242 "RTWS_POLL_WAIT_FULL", 243 "RTWS_POLL_WAIT_EXP", 244 "RTWS_POLL_WAIT_EXP_FULL", 245 "RTWS_SYNC", 246 "RTWS_STUTTER", 247 "RTWS_STOPPING", 248 }; 249 250 /* Record reader segment types and duration for first failing read. */ 251 struct rt_read_seg { 252 int rt_readstate; 253 unsigned long rt_delay_jiffies; 254 unsigned long rt_delay_ms; 255 unsigned long rt_delay_us; 256 bool rt_preempted; 257 }; 258 static int err_segs_recorded; 259 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 260 static int rt_read_nsegs; 261 262 static const char *rcu_torture_writer_state_getname(void) 263 { 264 unsigned int i = READ_ONCE(rcu_torture_writer_state); 265 266 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 267 return "???"; 268 return rcu_torture_writer_state_names[i]; 269 } 270 271 #ifdef CONFIG_RCU_TRACE 272 static u64 notrace rcu_trace_clock_local(void) 273 { 274 u64 ts = trace_clock_local(); 275 276 (void)do_div(ts, NSEC_PER_USEC); 277 return ts; 278 } 279 #else /* #ifdef CONFIG_RCU_TRACE */ 280 static u64 notrace rcu_trace_clock_local(void) 281 { 282 return 0ULL; 283 } 284 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 285 286 /* 287 * Stop aggressive CPU-hog tests a bit before the end of the test in order 288 * to avoid interfering with test shutdown. 289 */ 290 static bool shutdown_time_arrived(void) 291 { 292 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 293 } 294 295 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 296 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 297 /* and boost task create/destroy. */ 298 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 299 static bool barrier_phase; /* Test phase. */ 300 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 301 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 302 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 303 304 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 305 306 /* 307 * Allocate an element from the rcu_tortures pool. 308 */ 309 static struct rcu_torture * 310 rcu_torture_alloc(void) 311 { 312 struct list_head *p; 313 314 spin_lock_bh(&rcu_torture_lock); 315 if (list_empty(&rcu_torture_freelist)) { 316 atomic_inc(&n_rcu_torture_alloc_fail); 317 spin_unlock_bh(&rcu_torture_lock); 318 return NULL; 319 } 320 atomic_inc(&n_rcu_torture_alloc); 321 p = rcu_torture_freelist.next; 322 list_del_init(p); 323 spin_unlock_bh(&rcu_torture_lock); 324 return container_of(p, struct rcu_torture, rtort_free); 325 } 326 327 /* 328 * Free an element to the rcu_tortures pool. 329 */ 330 static void 331 rcu_torture_free(struct rcu_torture *p) 332 { 333 atomic_inc(&n_rcu_torture_free); 334 spin_lock_bh(&rcu_torture_lock); 335 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 336 spin_unlock_bh(&rcu_torture_lock); 337 } 338 339 /* 340 * Operations vector for selecting different types of tests. 341 */ 342 343 struct rcu_torture_ops { 344 int ttype; 345 void (*init)(void); 346 void (*cleanup)(void); 347 int (*readlock)(void); 348 void (*read_delay)(struct torture_random_state *rrsp, 349 struct rt_read_seg *rtrsp); 350 void (*readunlock)(int idx); 351 int (*readlock_held)(void); 352 unsigned long (*get_gp_seq)(void); 353 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 354 void (*deferred_free)(struct rcu_torture *p); 355 void (*sync)(void); 356 void (*exp_sync)(void); 357 unsigned long (*get_gp_state_exp)(void); 358 unsigned long (*start_gp_poll_exp)(void); 359 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 360 bool (*poll_gp_state_exp)(unsigned long oldstate); 361 void (*cond_sync_exp)(unsigned long oldstate); 362 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 363 unsigned long (*get_comp_state)(void); 364 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 365 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 366 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 367 unsigned long (*get_gp_state)(void); 368 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 369 unsigned long (*get_gp_completed)(void); 370 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); 371 unsigned long (*start_gp_poll)(void); 372 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 373 bool (*poll_gp_state)(unsigned long oldstate); 374 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 375 bool (*poll_need_2gp)(bool poll, bool poll_full); 376 void (*cond_sync)(unsigned long oldstate); 377 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 378 call_rcu_func_t call; 379 void (*cb_barrier)(void); 380 void (*fqs)(void); 381 void (*stats)(void); 382 void (*gp_kthread_dbg)(void); 383 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 384 int (*stall_dur)(void); 385 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 386 void (*gp_slow_register)(atomic_t *rgssp); 387 void (*gp_slow_unregister)(atomic_t *rgssp); 388 long cbflood_max; 389 int irq_capable; 390 int can_boost; 391 int extendables; 392 int slow_gps; 393 int no_pi_lock; 394 int debug_objects; 395 const char *name; 396 }; 397 398 static struct rcu_torture_ops *cur_ops; 399 400 /* 401 * Definitions for rcu torture testing. 402 */ 403 404 static int torture_readlock_not_held(void) 405 { 406 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 407 } 408 409 static int rcu_torture_read_lock(void) 410 { 411 rcu_read_lock(); 412 return 0; 413 } 414 415 static void 416 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 417 { 418 unsigned long started; 419 unsigned long completed; 420 const unsigned long shortdelay_us = 200; 421 unsigned long longdelay_ms = 300; 422 unsigned long long ts; 423 424 /* We want a short delay sometimes to make a reader delay the grace 425 * period, and we want a long delay occasionally to trigger 426 * force_quiescent_state. */ 427 428 if (!atomic_read(&rcu_fwd_cb_nodelay) && 429 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 430 started = cur_ops->get_gp_seq(); 431 ts = rcu_trace_clock_local(); 432 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 433 longdelay_ms = 5; /* Avoid triggering BH limits. */ 434 mdelay(longdelay_ms); 435 rtrsp->rt_delay_ms = longdelay_ms; 436 completed = cur_ops->get_gp_seq(); 437 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 438 started, completed); 439 } 440 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 441 udelay(shortdelay_us); 442 rtrsp->rt_delay_us = shortdelay_us; 443 } 444 if (!preempt_count() && 445 !(torture_random(rrsp) % (nrealreaders * 500))) { 446 torture_preempt_schedule(); /* QS only if preemptible. */ 447 rtrsp->rt_preempted = true; 448 } 449 } 450 451 static void rcu_torture_read_unlock(int idx) 452 { 453 rcu_read_unlock(); 454 } 455 456 /* 457 * Update callback in the pipe. This should be invoked after a grace period. 458 */ 459 static bool 460 rcu_torture_pipe_update_one(struct rcu_torture *rp) 461 { 462 int i; 463 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 464 465 if (rtrcp) { 466 WRITE_ONCE(rp->rtort_chkp, NULL); 467 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 468 } 469 i = rp->rtort_pipe_count; 470 if (i > RCU_TORTURE_PIPE_LEN) 471 i = RCU_TORTURE_PIPE_LEN; 472 atomic_inc(&rcu_torture_wcount[i]); 473 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 474 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 475 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 476 rp->rtort_mbtest = 0; 477 return true; 478 } 479 return false; 480 } 481 482 /* 483 * Update all callbacks in the pipe. Suitable for synchronous grace-period 484 * primitives. 485 */ 486 static void 487 rcu_torture_pipe_update(struct rcu_torture *old_rp) 488 { 489 struct rcu_torture *rp; 490 struct rcu_torture *rp1; 491 492 if (old_rp) 493 list_add(&old_rp->rtort_free, &rcu_torture_removed); 494 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 495 if (rcu_torture_pipe_update_one(rp)) { 496 list_del(&rp->rtort_free); 497 rcu_torture_free(rp); 498 } 499 } 500 } 501 502 static void 503 rcu_torture_cb(struct rcu_head *p) 504 { 505 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 506 507 if (torture_must_stop_irq()) { 508 /* Test is ending, just drop callbacks on the floor. */ 509 /* The next initialization will pick up the pieces. */ 510 return; 511 } 512 if (rcu_torture_pipe_update_one(rp)) 513 rcu_torture_free(rp); 514 else 515 cur_ops->deferred_free(rp); 516 } 517 518 static unsigned long rcu_no_completed(void) 519 { 520 return 0; 521 } 522 523 static void rcu_torture_deferred_free(struct rcu_torture *p) 524 { 525 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 526 } 527 528 static void rcu_sync_torture_init(void) 529 { 530 INIT_LIST_HEAD(&rcu_torture_removed); 531 } 532 533 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 534 { 535 return poll; 536 } 537 538 static struct rcu_torture_ops rcu_ops = { 539 .ttype = RCU_FLAVOR, 540 .init = rcu_sync_torture_init, 541 .readlock = rcu_torture_read_lock, 542 .read_delay = rcu_read_delay, 543 .readunlock = rcu_torture_read_unlock, 544 .readlock_held = torture_readlock_not_held, 545 .get_gp_seq = rcu_get_gp_seq, 546 .gp_diff = rcu_seq_diff, 547 .deferred_free = rcu_torture_deferred_free, 548 .sync = synchronize_rcu, 549 .exp_sync = synchronize_rcu_expedited, 550 .same_gp_state = same_state_synchronize_rcu, 551 .same_gp_state_full = same_state_synchronize_rcu_full, 552 .get_comp_state = get_completed_synchronize_rcu, 553 .get_comp_state_full = get_completed_synchronize_rcu_full, 554 .get_gp_state = get_state_synchronize_rcu, 555 .get_gp_state_full = get_state_synchronize_rcu_full, 556 .get_gp_completed = get_completed_synchronize_rcu, 557 .get_gp_completed_full = get_completed_synchronize_rcu_full, 558 .start_gp_poll = start_poll_synchronize_rcu, 559 .start_gp_poll_full = start_poll_synchronize_rcu_full, 560 .poll_gp_state = poll_state_synchronize_rcu, 561 .poll_gp_state_full = poll_state_synchronize_rcu_full, 562 .poll_need_2gp = rcu_poll_need_2gp, 563 .cond_sync = cond_synchronize_rcu, 564 .cond_sync_full = cond_synchronize_rcu_full, 565 .get_gp_state_exp = get_state_synchronize_rcu, 566 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 567 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 568 .poll_gp_state_exp = poll_state_synchronize_rcu, 569 .cond_sync_exp = cond_synchronize_rcu_expedited, 570 .call = call_rcu_hurry, 571 .cb_barrier = rcu_barrier, 572 .fqs = rcu_force_quiescent_state, 573 .gp_kthread_dbg = show_rcu_gp_kthreads, 574 .check_boost_failed = rcu_check_boost_fail, 575 .stall_dur = rcu_jiffies_till_stall_check, 576 .get_gp_data = rcutorture_get_gp_data, 577 .gp_slow_register = rcu_gp_slow_register, 578 .gp_slow_unregister = rcu_gp_slow_unregister, 579 .irq_capable = 1, 580 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 581 .extendables = RCUTORTURE_MAX_EXTEND, 582 .debug_objects = 1, 583 .name = "rcu" 584 }; 585 586 /* 587 * Don't even think about trying any of these in real life!!! 588 * The names includes "busted", and they really means it! 589 * The only purpose of these functions is to provide a buggy RCU 590 * implementation to make sure that rcutorture correctly emits 591 * buggy-RCU error messages. 592 */ 593 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 594 { 595 /* This is a deliberate bug for testing purposes only! */ 596 rcu_torture_cb(&p->rtort_rcu); 597 } 598 599 static void synchronize_rcu_busted(void) 600 { 601 /* This is a deliberate bug for testing purposes only! */ 602 } 603 604 static void 605 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 606 { 607 /* This is a deliberate bug for testing purposes only! */ 608 func(head); 609 } 610 611 static struct rcu_torture_ops rcu_busted_ops = { 612 .ttype = INVALID_RCU_FLAVOR, 613 .init = rcu_sync_torture_init, 614 .readlock = rcu_torture_read_lock, 615 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 616 .readunlock = rcu_torture_read_unlock, 617 .readlock_held = torture_readlock_not_held, 618 .get_gp_seq = rcu_no_completed, 619 .deferred_free = rcu_busted_torture_deferred_free, 620 .sync = synchronize_rcu_busted, 621 .exp_sync = synchronize_rcu_busted, 622 .call = call_rcu_busted, 623 .irq_capable = 1, 624 .name = "busted" 625 }; 626 627 /* 628 * Definitions for srcu torture testing. 629 */ 630 631 DEFINE_STATIC_SRCU(srcu_ctl); 632 static struct srcu_struct srcu_ctld; 633 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 634 static struct rcu_torture_ops srcud_ops; 635 636 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 637 { 638 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 639 } 640 641 static int srcu_torture_read_lock(void) 642 { 643 if (cur_ops == &srcud_ops) 644 return srcu_read_lock_nmisafe(srcu_ctlp); 645 else 646 return srcu_read_lock(srcu_ctlp); 647 } 648 649 static void 650 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 651 { 652 long delay; 653 const long uspertick = 1000000 / HZ; 654 const long longdelay = 10; 655 656 /* We want there to be long-running readers, but not all the time. */ 657 658 delay = torture_random(rrsp) % 659 (nrealreaders * 2 * longdelay * uspertick); 660 if (!delay && in_task()) { 661 schedule_timeout_interruptible(longdelay); 662 rtrsp->rt_delay_jiffies = longdelay; 663 } else { 664 rcu_read_delay(rrsp, rtrsp); 665 } 666 } 667 668 static void srcu_torture_read_unlock(int idx) 669 { 670 if (cur_ops == &srcud_ops) 671 srcu_read_unlock_nmisafe(srcu_ctlp, idx); 672 else 673 srcu_read_unlock(srcu_ctlp, idx); 674 } 675 676 static int torture_srcu_read_lock_held(void) 677 { 678 return srcu_read_lock_held(srcu_ctlp); 679 } 680 681 static unsigned long srcu_torture_completed(void) 682 { 683 return srcu_batches_completed(srcu_ctlp); 684 } 685 686 static void srcu_torture_deferred_free(struct rcu_torture *rp) 687 { 688 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 689 } 690 691 static void srcu_torture_synchronize(void) 692 { 693 synchronize_srcu(srcu_ctlp); 694 } 695 696 static unsigned long srcu_torture_get_gp_state(void) 697 { 698 return get_state_synchronize_srcu(srcu_ctlp); 699 } 700 701 static unsigned long srcu_torture_start_gp_poll(void) 702 { 703 return start_poll_synchronize_srcu(srcu_ctlp); 704 } 705 706 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 707 { 708 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 709 } 710 711 static void srcu_torture_call(struct rcu_head *head, 712 rcu_callback_t func) 713 { 714 call_srcu(srcu_ctlp, head, func); 715 } 716 717 static void srcu_torture_barrier(void) 718 { 719 srcu_barrier(srcu_ctlp); 720 } 721 722 static void srcu_torture_stats(void) 723 { 724 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 725 } 726 727 static void srcu_torture_synchronize_expedited(void) 728 { 729 synchronize_srcu_expedited(srcu_ctlp); 730 } 731 732 static struct rcu_torture_ops srcu_ops = { 733 .ttype = SRCU_FLAVOR, 734 .init = rcu_sync_torture_init, 735 .readlock = srcu_torture_read_lock, 736 .read_delay = srcu_read_delay, 737 .readunlock = srcu_torture_read_unlock, 738 .readlock_held = torture_srcu_read_lock_held, 739 .get_gp_seq = srcu_torture_completed, 740 .deferred_free = srcu_torture_deferred_free, 741 .sync = srcu_torture_synchronize, 742 .exp_sync = srcu_torture_synchronize_expedited, 743 .get_gp_state = srcu_torture_get_gp_state, 744 .start_gp_poll = srcu_torture_start_gp_poll, 745 .poll_gp_state = srcu_torture_poll_gp_state, 746 .call = srcu_torture_call, 747 .cb_barrier = srcu_torture_barrier, 748 .stats = srcu_torture_stats, 749 .get_gp_data = srcu_get_gp_data, 750 .cbflood_max = 50000, 751 .irq_capable = 1, 752 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 753 .debug_objects = 1, 754 .name = "srcu" 755 }; 756 757 static void srcu_torture_init(void) 758 { 759 rcu_sync_torture_init(); 760 WARN_ON(init_srcu_struct(&srcu_ctld)); 761 srcu_ctlp = &srcu_ctld; 762 } 763 764 static void srcu_torture_cleanup(void) 765 { 766 cleanup_srcu_struct(&srcu_ctld); 767 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 768 } 769 770 /* As above, but dynamically allocated. */ 771 static struct rcu_torture_ops srcud_ops = { 772 .ttype = SRCU_FLAVOR, 773 .init = srcu_torture_init, 774 .cleanup = srcu_torture_cleanup, 775 .readlock = srcu_torture_read_lock, 776 .read_delay = srcu_read_delay, 777 .readunlock = srcu_torture_read_unlock, 778 .readlock_held = torture_srcu_read_lock_held, 779 .get_gp_seq = srcu_torture_completed, 780 .deferred_free = srcu_torture_deferred_free, 781 .sync = srcu_torture_synchronize, 782 .exp_sync = srcu_torture_synchronize_expedited, 783 .get_gp_state = srcu_torture_get_gp_state, 784 .start_gp_poll = srcu_torture_start_gp_poll, 785 .poll_gp_state = srcu_torture_poll_gp_state, 786 .call = srcu_torture_call, 787 .cb_barrier = srcu_torture_barrier, 788 .stats = srcu_torture_stats, 789 .get_gp_data = srcu_get_gp_data, 790 .cbflood_max = 50000, 791 .irq_capable = 1, 792 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 793 .debug_objects = 1, 794 .name = "srcud" 795 }; 796 797 /* As above, but broken due to inappropriate reader extension. */ 798 static struct rcu_torture_ops busted_srcud_ops = { 799 .ttype = SRCU_FLAVOR, 800 .init = srcu_torture_init, 801 .cleanup = srcu_torture_cleanup, 802 .readlock = srcu_torture_read_lock, 803 .read_delay = rcu_read_delay, 804 .readunlock = srcu_torture_read_unlock, 805 .readlock_held = torture_srcu_read_lock_held, 806 .get_gp_seq = srcu_torture_completed, 807 .deferred_free = srcu_torture_deferred_free, 808 .sync = srcu_torture_synchronize, 809 .exp_sync = srcu_torture_synchronize_expedited, 810 .call = srcu_torture_call, 811 .cb_barrier = srcu_torture_barrier, 812 .stats = srcu_torture_stats, 813 .irq_capable = 1, 814 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 815 .extendables = RCUTORTURE_MAX_EXTEND, 816 .name = "busted_srcud" 817 }; 818 819 /* 820 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 821 * This implementation does not necessarily work well with CPU hotplug. 822 */ 823 824 static void synchronize_rcu_trivial(void) 825 { 826 int cpu; 827 828 for_each_online_cpu(cpu) { 829 torture_sched_setaffinity(current->pid, cpumask_of(cpu)); 830 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 831 } 832 } 833 834 static int rcu_torture_read_lock_trivial(void) 835 { 836 preempt_disable(); 837 return 0; 838 } 839 840 static void rcu_torture_read_unlock_trivial(int idx) 841 { 842 preempt_enable(); 843 } 844 845 static struct rcu_torture_ops trivial_ops = { 846 .ttype = RCU_TRIVIAL_FLAVOR, 847 .init = rcu_sync_torture_init, 848 .readlock = rcu_torture_read_lock_trivial, 849 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 850 .readunlock = rcu_torture_read_unlock_trivial, 851 .readlock_held = torture_readlock_not_held, 852 .get_gp_seq = rcu_no_completed, 853 .sync = synchronize_rcu_trivial, 854 .exp_sync = synchronize_rcu_trivial, 855 .irq_capable = 1, 856 .name = "trivial" 857 }; 858 859 #ifdef CONFIG_TASKS_RCU 860 861 /* 862 * Definitions for RCU-tasks torture testing. 863 */ 864 865 static int tasks_torture_read_lock(void) 866 { 867 return 0; 868 } 869 870 static void tasks_torture_read_unlock(int idx) 871 { 872 } 873 874 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 875 { 876 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 877 } 878 879 static void synchronize_rcu_mult_test(void) 880 { 881 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 882 } 883 884 static struct rcu_torture_ops tasks_ops = { 885 .ttype = RCU_TASKS_FLAVOR, 886 .init = rcu_sync_torture_init, 887 .readlock = tasks_torture_read_lock, 888 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 889 .readunlock = tasks_torture_read_unlock, 890 .get_gp_seq = rcu_no_completed, 891 .deferred_free = rcu_tasks_torture_deferred_free, 892 .sync = synchronize_rcu_tasks, 893 .exp_sync = synchronize_rcu_mult_test, 894 .call = call_rcu_tasks, 895 .cb_barrier = rcu_barrier_tasks, 896 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 897 .get_gp_data = rcu_tasks_get_gp_data, 898 .irq_capable = 1, 899 .slow_gps = 1, 900 .name = "tasks" 901 }; 902 903 #define TASKS_OPS &tasks_ops, 904 905 #else // #ifdef CONFIG_TASKS_RCU 906 907 #define TASKS_OPS 908 909 #endif // #else #ifdef CONFIG_TASKS_RCU 910 911 912 #ifdef CONFIG_TASKS_RUDE_RCU 913 914 /* 915 * Definitions for rude RCU-tasks torture testing. 916 */ 917 918 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 919 { 920 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 921 } 922 923 static struct rcu_torture_ops tasks_rude_ops = { 924 .ttype = RCU_TASKS_RUDE_FLAVOR, 925 .init = rcu_sync_torture_init, 926 .readlock = rcu_torture_read_lock_trivial, 927 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 928 .readunlock = rcu_torture_read_unlock_trivial, 929 .get_gp_seq = rcu_no_completed, 930 .deferred_free = rcu_tasks_rude_torture_deferred_free, 931 .sync = synchronize_rcu_tasks_rude, 932 .exp_sync = synchronize_rcu_tasks_rude, 933 .call = call_rcu_tasks_rude, 934 .cb_barrier = rcu_barrier_tasks_rude, 935 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 936 .get_gp_data = rcu_tasks_rude_get_gp_data, 937 .cbflood_max = 50000, 938 .irq_capable = 1, 939 .name = "tasks-rude" 940 }; 941 942 #define TASKS_RUDE_OPS &tasks_rude_ops, 943 944 #else // #ifdef CONFIG_TASKS_RUDE_RCU 945 946 #define TASKS_RUDE_OPS 947 948 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 949 950 951 #ifdef CONFIG_TASKS_TRACE_RCU 952 953 /* 954 * Definitions for tracing RCU-tasks torture testing. 955 */ 956 957 static int tasks_tracing_torture_read_lock(void) 958 { 959 rcu_read_lock_trace(); 960 return 0; 961 } 962 963 static void tasks_tracing_torture_read_unlock(int idx) 964 { 965 rcu_read_unlock_trace(); 966 } 967 968 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 969 { 970 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 971 } 972 973 static struct rcu_torture_ops tasks_tracing_ops = { 974 .ttype = RCU_TASKS_TRACING_FLAVOR, 975 .init = rcu_sync_torture_init, 976 .readlock = tasks_tracing_torture_read_lock, 977 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 978 .readunlock = tasks_tracing_torture_read_unlock, 979 .readlock_held = rcu_read_lock_trace_held, 980 .get_gp_seq = rcu_no_completed, 981 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 982 .sync = synchronize_rcu_tasks_trace, 983 .exp_sync = synchronize_rcu_tasks_trace, 984 .call = call_rcu_tasks_trace, 985 .cb_barrier = rcu_barrier_tasks_trace, 986 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 987 .get_gp_data = rcu_tasks_trace_get_gp_data, 988 .cbflood_max = 50000, 989 .irq_capable = 1, 990 .slow_gps = 1, 991 .name = "tasks-tracing" 992 }; 993 994 #define TASKS_TRACING_OPS &tasks_tracing_ops, 995 996 #else // #ifdef CONFIG_TASKS_TRACE_RCU 997 998 #define TASKS_TRACING_OPS 999 1000 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 1001 1002 1003 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 1004 { 1005 if (!cur_ops->gp_diff) 1006 return new - old; 1007 return cur_ops->gp_diff(new, old); 1008 } 1009 1010 /* 1011 * RCU torture priority-boost testing. Runs one real-time thread per 1012 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1013 * for them to complete. If a given grace period takes too long, we assume 1014 * that priority inversion has occurred. 1015 */ 1016 1017 static int old_rt_runtime = -1; 1018 1019 static void rcu_torture_disable_rt_throttle(void) 1020 { 1021 /* 1022 * Disable RT throttling so that rcutorture's boost threads don't get 1023 * throttled. Only possible if rcutorture is built-in otherwise the 1024 * user should manually do this by setting the sched_rt_period_us and 1025 * sched_rt_runtime sysctls. 1026 */ 1027 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1028 return; 1029 1030 old_rt_runtime = sysctl_sched_rt_runtime; 1031 sysctl_sched_rt_runtime = -1; 1032 } 1033 1034 static void rcu_torture_enable_rt_throttle(void) 1035 { 1036 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1037 return; 1038 1039 sysctl_sched_rt_runtime = old_rt_runtime; 1040 old_rt_runtime = -1; 1041 } 1042 1043 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1044 { 1045 int cpu; 1046 static int dbg_done; 1047 unsigned long end = jiffies; 1048 bool gp_done; 1049 unsigned long j; 1050 static unsigned long last_persist; 1051 unsigned long lp; 1052 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1053 1054 if (end - *start > mininterval) { 1055 // Recheck after checking time to avoid false positives. 1056 smp_mb(); // Time check before grace-period check. 1057 if (cur_ops->poll_gp_state(gp_state)) 1058 return false; // passed, though perhaps just barely 1059 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1060 // At most one persisted message per boost test. 1061 j = jiffies; 1062 lp = READ_ONCE(last_persist); 1063 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1064 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1065 return false; // passed on a technicality 1066 } 1067 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1068 n_rcu_torture_boost_failure++; 1069 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1070 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1071 current->rt_priority, gp_state, end - *start); 1072 cur_ops->gp_kthread_dbg(); 1073 // Recheck after print to flag grace period ending during splat. 1074 gp_done = cur_ops->poll_gp_state(gp_state); 1075 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1076 gp_done ? "ended already" : "still pending"); 1077 1078 } 1079 1080 return true; // failed 1081 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1082 *start = jiffies; 1083 } 1084 1085 return false; // passed 1086 } 1087 1088 static int rcu_torture_boost(void *arg) 1089 { 1090 unsigned long endtime; 1091 unsigned long gp_state; 1092 unsigned long gp_state_time; 1093 unsigned long oldstarttime; 1094 1095 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1096 1097 /* Set real-time priority. */ 1098 sched_set_fifo_low(current); 1099 1100 /* Each pass through the following loop does one boost-test cycle. */ 1101 do { 1102 bool failed = false; // Test failed already in this test interval 1103 bool gp_initiated = false; 1104 1105 if (kthread_should_stop()) 1106 goto checkwait; 1107 1108 /* Wait for the next test interval. */ 1109 oldstarttime = READ_ONCE(boost_starttime); 1110 while (time_before(jiffies, oldstarttime)) { 1111 schedule_timeout_interruptible(oldstarttime - jiffies); 1112 if (stutter_wait("rcu_torture_boost")) 1113 sched_set_fifo_low(current); 1114 if (torture_must_stop()) 1115 goto checkwait; 1116 } 1117 1118 // Do one boost-test interval. 1119 endtime = oldstarttime + test_boost_duration * HZ; 1120 while (time_before(jiffies, endtime)) { 1121 // Has current GP gone too long? 1122 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1123 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1124 // If we don't have a grace period in flight, start one. 1125 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1126 gp_state = cur_ops->start_gp_poll(); 1127 gp_initiated = true; 1128 gp_state_time = jiffies; 1129 } 1130 if (stutter_wait("rcu_torture_boost")) { 1131 sched_set_fifo_low(current); 1132 // If the grace period already ended, 1133 // we don't know when that happened, so 1134 // start over. 1135 if (cur_ops->poll_gp_state(gp_state)) 1136 gp_initiated = false; 1137 } 1138 if (torture_must_stop()) 1139 goto checkwait; 1140 } 1141 1142 // In case the grace period extended beyond the end of the loop. 1143 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1144 rcu_torture_boost_failed(gp_state, &gp_state_time); 1145 1146 /* 1147 * Set the start time of the next test interval. 1148 * Yes, this is vulnerable to long delays, but such 1149 * delays simply cause a false negative for the next 1150 * interval. Besides, we are running at RT priority, 1151 * so delays should be relatively rare. 1152 */ 1153 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1154 if (mutex_trylock(&boost_mutex)) { 1155 if (oldstarttime == boost_starttime) { 1156 WRITE_ONCE(boost_starttime, 1157 jiffies + test_boost_interval * HZ); 1158 n_rcu_torture_boosts++; 1159 } 1160 mutex_unlock(&boost_mutex); 1161 break; 1162 } 1163 schedule_timeout_uninterruptible(HZ / 20); 1164 } 1165 1166 /* Go do the stutter. */ 1167 checkwait: if (stutter_wait("rcu_torture_boost")) 1168 sched_set_fifo_low(current); 1169 } while (!torture_must_stop()); 1170 1171 /* Clean up and exit. */ 1172 while (!kthread_should_stop()) { 1173 torture_shutdown_absorb("rcu_torture_boost"); 1174 schedule_timeout_uninterruptible(HZ / 20); 1175 } 1176 torture_kthread_stopping("rcu_torture_boost"); 1177 return 0; 1178 } 1179 1180 /* 1181 * RCU torture force-quiescent-state kthread. Repeatedly induces 1182 * bursts of calls to force_quiescent_state(), increasing the probability 1183 * of occurrence of some important types of race conditions. 1184 */ 1185 static int 1186 rcu_torture_fqs(void *arg) 1187 { 1188 unsigned long fqs_resume_time; 1189 int fqs_burst_remaining; 1190 int oldnice = task_nice(current); 1191 1192 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1193 do { 1194 fqs_resume_time = jiffies + fqs_stutter * HZ; 1195 while (time_before(jiffies, fqs_resume_time) && 1196 !kthread_should_stop()) { 1197 schedule_timeout_interruptible(HZ / 20); 1198 } 1199 fqs_burst_remaining = fqs_duration; 1200 while (fqs_burst_remaining > 0 && 1201 !kthread_should_stop()) { 1202 cur_ops->fqs(); 1203 udelay(fqs_holdoff); 1204 fqs_burst_remaining -= fqs_holdoff; 1205 } 1206 if (stutter_wait("rcu_torture_fqs")) 1207 sched_set_normal(current, oldnice); 1208 } while (!torture_must_stop()); 1209 torture_kthread_stopping("rcu_torture_fqs"); 1210 return 0; 1211 } 1212 1213 // Used by writers to randomly choose from the available grace-period primitives. 1214 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1215 static int nsynctypes; 1216 1217 /* 1218 * Determine which grace-period primitives are available. 1219 */ 1220 static void rcu_torture_write_types(void) 1221 { 1222 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1223 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1224 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1225 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1226 1227 /* Initialize synctype[] array. If none set, take default. */ 1228 if (!gp_cond1 && 1229 !gp_cond_exp1 && 1230 !gp_cond_full1 && 1231 !gp_cond_exp_full1 && 1232 !gp_exp1 && 1233 !gp_poll_exp1 && 1234 !gp_poll_exp_full1 && 1235 !gp_normal1 && 1236 !gp_poll1 && 1237 !gp_poll_full1 && 1238 !gp_sync1) { 1239 gp_cond1 = true; 1240 gp_cond_exp1 = true; 1241 gp_cond_full1 = true; 1242 gp_cond_exp_full1 = true; 1243 gp_exp1 = true; 1244 gp_poll_exp1 = true; 1245 gp_poll_exp_full1 = true; 1246 gp_normal1 = true; 1247 gp_poll1 = true; 1248 gp_poll_full1 = true; 1249 gp_sync1 = true; 1250 } 1251 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1252 synctype[nsynctypes++] = RTWS_COND_GET; 1253 pr_info("%s: Testing conditional GPs.\n", __func__); 1254 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1255 pr_alert("%s: gp_cond without primitives.\n", __func__); 1256 } 1257 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1258 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1259 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1260 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1261 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1262 } 1263 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1264 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1265 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1266 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1267 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1268 } 1269 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1270 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1271 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1272 } else if (gp_cond_exp_full && 1273 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1274 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1275 } 1276 if (gp_exp1 && cur_ops->exp_sync) { 1277 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1278 pr_info("%s: Testing expedited GPs.\n", __func__); 1279 } else if (gp_exp && !cur_ops->exp_sync) { 1280 pr_alert("%s: gp_exp without primitives.\n", __func__); 1281 } 1282 if (gp_normal1 && cur_ops->deferred_free) { 1283 synctype[nsynctypes++] = RTWS_DEF_FREE; 1284 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1285 } else if (gp_normal && !cur_ops->deferred_free) { 1286 pr_alert("%s: gp_normal without primitives.\n", __func__); 1287 } 1288 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1289 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1290 synctype[nsynctypes++] = RTWS_POLL_GET; 1291 pr_info("%s: Testing polling GPs.\n", __func__); 1292 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1293 pr_alert("%s: gp_poll without primitives.\n", __func__); 1294 } 1295 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1296 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1297 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1298 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1299 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1300 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1301 } 1302 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1303 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1304 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1305 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1306 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1307 } 1308 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1309 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1310 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1311 } else if (gp_poll_exp_full && 1312 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1313 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1314 } 1315 if (gp_sync1 && cur_ops->sync) { 1316 synctype[nsynctypes++] = RTWS_SYNC; 1317 pr_info("%s: Testing normal GPs.\n", __func__); 1318 } else if (gp_sync && !cur_ops->sync) { 1319 pr_alert("%s: gp_sync without primitives.\n", __func__); 1320 } 1321 } 1322 1323 /* 1324 * Do the specified rcu_torture_writer() synchronous grace period, 1325 * while also testing out the polled APIs. Note well that the single-CPU 1326 * grace-period optimizations must be accounted for. 1327 */ 1328 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1329 { 1330 unsigned long cookie; 1331 struct rcu_gp_oldstate cookie_full; 1332 bool dopoll; 1333 bool dopoll_full; 1334 unsigned long r = torture_random(trsp); 1335 1336 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1337 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1338 if (dopoll || dopoll_full) 1339 cpus_read_lock(); 1340 if (dopoll) 1341 cookie = cur_ops->get_gp_state(); 1342 if (dopoll_full) 1343 cur_ops->get_gp_state_full(&cookie_full); 1344 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1345 sync(); 1346 sync(); 1347 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1348 "%s: Cookie check 3 failed %pS() online %*pbl.", 1349 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1350 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1351 "%s: Cookie check 4 failed %pS() online %*pbl", 1352 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1353 if (dopoll || dopoll_full) 1354 cpus_read_unlock(); 1355 } 1356 1357 /* 1358 * RCU torture writer kthread. Repeatedly substitutes a new structure 1359 * for that pointed to by rcu_torture_current, freeing the old structure 1360 * after a series of grace periods (the "pipeline"). 1361 */ 1362 static int 1363 rcu_torture_writer(void *arg) 1364 { 1365 bool boot_ended; 1366 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1367 unsigned long cookie; 1368 struct rcu_gp_oldstate cookie_full; 1369 int expediting = 0; 1370 unsigned long gp_snap; 1371 unsigned long gp_snap1; 1372 struct rcu_gp_oldstate gp_snap_full; 1373 struct rcu_gp_oldstate gp_snap1_full; 1374 int i; 1375 int idx; 1376 int oldnice = task_nice(current); 1377 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE]; 1378 struct rcu_torture *rp; 1379 struct rcu_torture *old_rp; 1380 static DEFINE_TORTURE_RANDOM(rand); 1381 unsigned long stallsdone = jiffies; 1382 bool stutter_waited; 1383 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE]; 1384 1385 // If a new stall test is added, this must be adjusted. 1386 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1387 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ; 1388 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1389 if (!can_expedite) 1390 pr_alert("%s" TORTURE_FLAG 1391 " GP expediting controlled from boot/sysfs for %s.\n", 1392 torture_type, cur_ops->name); 1393 if (WARN_ONCE(nsynctypes == 0, 1394 "%s: No update-side primitives.\n", __func__)) { 1395 /* 1396 * No updates primitives, so don't try updating. 1397 * The resulting test won't be testing much, hence the 1398 * above WARN_ONCE(). 1399 */ 1400 rcu_torture_writer_state = RTWS_STOPPING; 1401 torture_kthread_stopping("rcu_torture_writer"); 1402 return 0; 1403 } 1404 1405 do { 1406 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1407 torture_hrtimeout_us(500, 1000, &rand); 1408 rp = rcu_torture_alloc(); 1409 if (rp == NULL) 1410 continue; 1411 rp->rtort_pipe_count = 0; 1412 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1413 rcu_torture_writer_state = RTWS_DELAY; 1414 udelay(torture_random(&rand) & 0x3ff); 1415 rcu_torture_writer_state = RTWS_REPLACE; 1416 old_rp = rcu_dereference_check(rcu_torture_current, 1417 current == writer_task); 1418 rp->rtort_mbtest = 1; 1419 rcu_assign_pointer(rcu_torture_current, rp); 1420 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1421 if (old_rp) { 1422 i = old_rp->rtort_pipe_count; 1423 if (i > RCU_TORTURE_PIPE_LEN) 1424 i = RCU_TORTURE_PIPE_LEN; 1425 atomic_inc(&rcu_torture_wcount[i]); 1426 WRITE_ONCE(old_rp->rtort_pipe_count, 1427 old_rp->rtort_pipe_count + 1); 1428 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1429 1430 // Make sure readers block polled grace periods. 1431 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1432 idx = cur_ops->readlock(); 1433 cookie = cur_ops->get_gp_state(); 1434 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1435 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1436 __func__, 1437 rcu_torture_writer_state_getname(), 1438 rcu_torture_writer_state, 1439 cookie, cur_ops->get_gp_state()); 1440 if (cur_ops->get_gp_completed) { 1441 cookie = cur_ops->get_gp_completed(); 1442 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1443 } 1444 cur_ops->readunlock(idx); 1445 } 1446 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1447 idx = cur_ops->readlock(); 1448 cur_ops->get_gp_state_full(&cookie_full); 1449 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1450 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1451 __func__, 1452 rcu_torture_writer_state_getname(), 1453 rcu_torture_writer_state, 1454 cpumask_pr_args(cpu_online_mask)); 1455 if (cur_ops->get_gp_completed_full) { 1456 cur_ops->get_gp_completed_full(&cookie_full); 1457 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1458 } 1459 cur_ops->readunlock(idx); 1460 } 1461 switch (synctype[torture_random(&rand) % nsynctypes]) { 1462 case RTWS_DEF_FREE: 1463 rcu_torture_writer_state = RTWS_DEF_FREE; 1464 cur_ops->deferred_free(old_rp); 1465 break; 1466 case RTWS_EXP_SYNC: 1467 rcu_torture_writer_state = RTWS_EXP_SYNC; 1468 do_rtws_sync(&rand, cur_ops->exp_sync); 1469 rcu_torture_pipe_update(old_rp); 1470 break; 1471 case RTWS_COND_GET: 1472 rcu_torture_writer_state = RTWS_COND_GET; 1473 gp_snap = cur_ops->get_gp_state(); 1474 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1475 rcu_torture_writer_state = RTWS_COND_SYNC; 1476 cur_ops->cond_sync(gp_snap); 1477 rcu_torture_pipe_update(old_rp); 1478 break; 1479 case RTWS_COND_GET_EXP: 1480 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1481 gp_snap = cur_ops->get_gp_state_exp(); 1482 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1483 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1484 cur_ops->cond_sync_exp(gp_snap); 1485 rcu_torture_pipe_update(old_rp); 1486 break; 1487 case RTWS_COND_GET_FULL: 1488 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1489 cur_ops->get_gp_state_full(&gp_snap_full); 1490 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1491 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1492 cur_ops->cond_sync_full(&gp_snap_full); 1493 rcu_torture_pipe_update(old_rp); 1494 break; 1495 case RTWS_COND_GET_EXP_FULL: 1496 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1497 cur_ops->get_gp_state_full(&gp_snap_full); 1498 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1499 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1500 cur_ops->cond_sync_exp_full(&gp_snap_full); 1501 rcu_torture_pipe_update(old_rp); 1502 break; 1503 case RTWS_POLL_GET: 1504 rcu_torture_writer_state = RTWS_POLL_GET; 1505 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1506 ulo[i] = cur_ops->get_comp_state(); 1507 gp_snap = cur_ops->start_gp_poll(); 1508 rcu_torture_writer_state = RTWS_POLL_WAIT; 1509 while (!cur_ops->poll_gp_state(gp_snap)) { 1510 gp_snap1 = cur_ops->get_gp_state(); 1511 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1512 if (cur_ops->poll_gp_state(ulo[i]) || 1513 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1514 ulo[i] = gp_snap1; 1515 break; 1516 } 1517 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo)); 1518 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1519 &rand); 1520 } 1521 rcu_torture_pipe_update(old_rp); 1522 break; 1523 case RTWS_POLL_GET_FULL: 1524 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1525 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1526 cur_ops->get_comp_state_full(&rgo[i]); 1527 cur_ops->start_gp_poll_full(&gp_snap_full); 1528 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1529 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1530 cur_ops->get_gp_state_full(&gp_snap1_full); 1531 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1532 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1533 cur_ops->same_gp_state_full(&rgo[i], 1534 &gp_snap1_full)) { 1535 rgo[i] = gp_snap1_full; 1536 break; 1537 } 1538 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo)); 1539 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1540 &rand); 1541 } 1542 rcu_torture_pipe_update(old_rp); 1543 break; 1544 case RTWS_POLL_GET_EXP: 1545 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1546 gp_snap = cur_ops->start_gp_poll_exp(); 1547 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1548 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1549 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1550 &rand); 1551 rcu_torture_pipe_update(old_rp); 1552 break; 1553 case RTWS_POLL_GET_EXP_FULL: 1554 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1555 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1556 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1557 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1558 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1559 &rand); 1560 rcu_torture_pipe_update(old_rp); 1561 break; 1562 case RTWS_SYNC: 1563 rcu_torture_writer_state = RTWS_SYNC; 1564 do_rtws_sync(&rand, cur_ops->sync); 1565 rcu_torture_pipe_update(old_rp); 1566 break; 1567 default: 1568 WARN_ON_ONCE(1); 1569 break; 1570 } 1571 } 1572 WRITE_ONCE(rcu_torture_current_version, 1573 rcu_torture_current_version + 1); 1574 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1575 if (can_expedite && 1576 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1577 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1578 if (expediting >= 0) 1579 rcu_expedite_gp(); 1580 else 1581 rcu_unexpedite_gp(); 1582 if (++expediting > 3) 1583 expediting = -expediting; 1584 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1585 can_expedite = !rcu_gp_is_expedited() && 1586 !rcu_gp_is_normal(); 1587 } 1588 rcu_torture_writer_state = RTWS_STUTTER; 1589 boot_ended = rcu_inkernel_boot_has_ended(); 1590 stutter_waited = stutter_wait("rcu_torture_writer"); 1591 if (stutter_waited && 1592 !atomic_read(&rcu_fwd_cb_nodelay) && 1593 !cur_ops->slow_gps && 1594 !torture_must_stop() && 1595 boot_ended && 1596 time_after(jiffies, stallsdone)) 1597 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1598 if (list_empty(&rcu_tortures[i].rtort_free) && 1599 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1600 tracing_off(); 1601 if (cur_ops->gp_kthread_dbg) 1602 cur_ops->gp_kthread_dbg(); 1603 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1604 rcu_ftrace_dump(DUMP_ALL); 1605 } 1606 if (stutter_waited) 1607 sched_set_normal(current, oldnice); 1608 } while (!torture_must_stop()); 1609 rcu_torture_current = NULL; // Let stats task know that we are done. 1610 /* Reset expediting back to unexpedited. */ 1611 if (expediting > 0) 1612 expediting = -expediting; 1613 while (can_expedite && expediting++ < 0) 1614 rcu_unexpedite_gp(); 1615 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1616 if (!can_expedite) 1617 pr_alert("%s" TORTURE_FLAG 1618 " Dynamic grace-period expediting was disabled.\n", 1619 torture_type); 1620 rcu_torture_writer_state = RTWS_STOPPING; 1621 torture_kthread_stopping("rcu_torture_writer"); 1622 return 0; 1623 } 1624 1625 /* 1626 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1627 * delay between calls. 1628 */ 1629 static int 1630 rcu_torture_fakewriter(void *arg) 1631 { 1632 unsigned long gp_snap; 1633 struct rcu_gp_oldstate gp_snap_full; 1634 DEFINE_TORTURE_RANDOM(rand); 1635 1636 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1637 set_user_nice(current, MAX_NICE); 1638 1639 if (WARN_ONCE(nsynctypes == 0, 1640 "%s: No update-side primitives.\n", __func__)) { 1641 /* 1642 * No updates primitives, so don't try updating. 1643 * The resulting test won't be testing much, hence the 1644 * above WARN_ONCE(). 1645 */ 1646 torture_kthread_stopping("rcu_torture_fakewriter"); 1647 return 0; 1648 } 1649 1650 do { 1651 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1652 if (cur_ops->cb_barrier != NULL && 1653 torture_random(&rand) % (nfakewriters * 8) == 0) { 1654 cur_ops->cb_barrier(); 1655 } else { 1656 switch (synctype[torture_random(&rand) % nsynctypes]) { 1657 case RTWS_DEF_FREE: 1658 break; 1659 case RTWS_EXP_SYNC: 1660 cur_ops->exp_sync(); 1661 break; 1662 case RTWS_COND_GET: 1663 gp_snap = cur_ops->get_gp_state(); 1664 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1665 cur_ops->cond_sync(gp_snap); 1666 break; 1667 case RTWS_COND_GET_EXP: 1668 gp_snap = cur_ops->get_gp_state_exp(); 1669 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1670 cur_ops->cond_sync_exp(gp_snap); 1671 break; 1672 case RTWS_COND_GET_FULL: 1673 cur_ops->get_gp_state_full(&gp_snap_full); 1674 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1675 cur_ops->cond_sync_full(&gp_snap_full); 1676 break; 1677 case RTWS_COND_GET_EXP_FULL: 1678 cur_ops->get_gp_state_full(&gp_snap_full); 1679 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1680 cur_ops->cond_sync_exp_full(&gp_snap_full); 1681 break; 1682 case RTWS_POLL_GET: 1683 gp_snap = cur_ops->start_gp_poll(); 1684 while (!cur_ops->poll_gp_state(gp_snap)) { 1685 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1686 &rand); 1687 } 1688 break; 1689 case RTWS_POLL_GET_FULL: 1690 cur_ops->start_gp_poll_full(&gp_snap_full); 1691 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1692 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1693 &rand); 1694 } 1695 break; 1696 case RTWS_POLL_GET_EXP: 1697 gp_snap = cur_ops->start_gp_poll_exp(); 1698 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1699 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1700 &rand); 1701 } 1702 break; 1703 case RTWS_POLL_GET_EXP_FULL: 1704 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1705 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1706 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1707 &rand); 1708 } 1709 break; 1710 case RTWS_SYNC: 1711 cur_ops->sync(); 1712 break; 1713 default: 1714 WARN_ON_ONCE(1); 1715 break; 1716 } 1717 } 1718 stutter_wait("rcu_torture_fakewriter"); 1719 } while (!torture_must_stop()); 1720 1721 torture_kthread_stopping("rcu_torture_fakewriter"); 1722 return 0; 1723 } 1724 1725 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1726 { 1727 kfree(rhp); 1728 } 1729 1730 // Set up and carry out testing of RCU's global memory ordering 1731 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1732 struct torture_random_state *trsp) 1733 { 1734 unsigned long loops; 1735 int noc = torture_num_online_cpus(); 1736 int rdrchked; 1737 int rdrchker; 1738 struct rcu_torture_reader_check *rtrcp; // Me. 1739 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1740 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1741 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1742 1743 if (myid < 0) 1744 return; // Don't try this from timer handlers. 1745 1746 // Increment my counter. 1747 rtrcp = &rcu_torture_reader_mbchk[myid]; 1748 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1749 1750 // Attempt to assign someone else some checking work. 1751 rdrchked = torture_random(trsp) % nrealreaders; 1752 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1753 rdrchker = torture_random(trsp) % nrealreaders; 1754 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1755 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1756 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1757 !READ_ONCE(rtp->rtort_chkp) && 1758 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1759 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1760 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1761 rtrcp->rtc_chkrdr = rdrchked; 1762 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1763 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1764 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1765 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1766 } 1767 1768 // If assigned some completed work, do it! 1769 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1770 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1771 return; // No work or work not yet ready. 1772 rdrchked = rtrcp_assigner->rtc_chkrdr; 1773 if (WARN_ON_ONCE(rdrchked < 0)) 1774 return; 1775 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1776 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1777 atomic_inc(&n_rcu_torture_mbchk_tries); 1778 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1779 atomic_inc(&n_rcu_torture_mbchk_fail); 1780 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1781 rtrcp_assigner->rtc_ready = 0; 1782 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1783 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1784 } 1785 1786 /* 1787 * Do one extension of an RCU read-side critical section using the 1788 * current reader state in readstate (set to zero for initial entry 1789 * to extended critical section), set the new state as specified by 1790 * newstate (set to zero for final exit from extended critical section), 1791 * and random-number-generator state in trsp. If this is neither the 1792 * beginning or end of the critical section and if there was actually a 1793 * change, do a ->read_delay(). 1794 */ 1795 static void rcutorture_one_extend(int *readstate, int newstate, 1796 struct torture_random_state *trsp, 1797 struct rt_read_seg *rtrsp) 1798 { 1799 unsigned long flags; 1800 int idxnew1 = -1; 1801 int idxnew2 = -1; 1802 int idxold1 = *readstate; 1803 int idxold2 = idxold1; 1804 int statesnew = ~*readstate & newstate; 1805 int statesold = *readstate & ~newstate; 1806 1807 WARN_ON_ONCE(idxold2 < 0); 1808 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1809 rtrsp->rt_readstate = newstate; 1810 1811 /* First, put new protection in place to avoid critical-section gap. */ 1812 if (statesnew & RCUTORTURE_RDR_BH) 1813 local_bh_disable(); 1814 if (statesnew & RCUTORTURE_RDR_RBH) 1815 rcu_read_lock_bh(); 1816 if (statesnew & RCUTORTURE_RDR_IRQ) 1817 local_irq_disable(); 1818 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1819 preempt_disable(); 1820 if (statesnew & RCUTORTURE_RDR_SCHED) 1821 rcu_read_lock_sched(); 1822 if (statesnew & RCUTORTURE_RDR_RCU_1) 1823 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1824 if (statesnew & RCUTORTURE_RDR_RCU_2) 1825 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1826 1827 /* 1828 * Next, remove old protection, in decreasing order of strength 1829 * to avoid unlock paths that aren't safe in the stronger 1830 * context. Namely: BH can not be enabled with disabled interrupts. 1831 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1832 * context. 1833 */ 1834 if (statesold & RCUTORTURE_RDR_IRQ) 1835 local_irq_enable(); 1836 if (statesold & RCUTORTURE_RDR_PREEMPT) 1837 preempt_enable(); 1838 if (statesold & RCUTORTURE_RDR_SCHED) 1839 rcu_read_unlock_sched(); 1840 if (statesold & RCUTORTURE_RDR_BH) 1841 local_bh_enable(); 1842 if (statesold & RCUTORTURE_RDR_RBH) 1843 rcu_read_unlock_bh(); 1844 if (statesold & RCUTORTURE_RDR_RCU_2) { 1845 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1846 WARN_ON_ONCE(idxnew2 != -1); 1847 idxold2 = 0; 1848 } 1849 if (statesold & RCUTORTURE_RDR_RCU_1) { 1850 bool lockit; 1851 1852 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1853 if (lockit) 1854 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1855 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1856 WARN_ON_ONCE(idxnew1 != -1); 1857 idxold1 = 0; 1858 if (lockit) 1859 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1860 } 1861 1862 /* Delay if neither beginning nor end and there was a change. */ 1863 if ((statesnew || statesold) && *readstate && newstate) 1864 cur_ops->read_delay(trsp, rtrsp); 1865 1866 /* Update the reader state. */ 1867 if (idxnew1 == -1) 1868 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1869 WARN_ON_ONCE(idxnew1 < 0); 1870 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1871 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1872 if (idxnew2 == -1) 1873 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1874 WARN_ON_ONCE(idxnew2 < 0); 1875 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1876 *readstate = idxnew1 | idxnew2 | newstate; 1877 WARN_ON_ONCE(*readstate < 0); 1878 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1879 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1880 } 1881 1882 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1883 static int rcutorture_extend_mask_max(void) 1884 { 1885 int mask; 1886 1887 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1888 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1889 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1890 return mask; 1891 } 1892 1893 /* Return a random protection state mask, but with at least one bit set. */ 1894 static int 1895 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1896 { 1897 int mask = rcutorture_extend_mask_max(); 1898 unsigned long randmask1 = torture_random(trsp); 1899 unsigned long randmask2 = randmask1 >> 3; 1900 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1901 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1902 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1903 1904 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1905 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1906 if (!(randmask1 & 0x7)) 1907 mask = mask & randmask2; 1908 else 1909 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1910 1911 // Can't have nested RCU reader without outer RCU reader. 1912 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1913 if (oldmask & RCUTORTURE_RDR_RCU_1) 1914 mask &= ~RCUTORTURE_RDR_RCU_2; 1915 else 1916 mask |= RCUTORTURE_RDR_RCU_1; 1917 } 1918 1919 /* 1920 * Can't enable bh w/irq disabled. 1921 */ 1922 if (mask & RCUTORTURE_RDR_IRQ) 1923 mask |= oldmask & bhs; 1924 1925 /* 1926 * Ideally these sequences would be detected in debug builds 1927 * (regardless of RT), but until then don't stop testing 1928 * them on non-RT. 1929 */ 1930 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1931 /* Can't modify BH in atomic context */ 1932 if (oldmask & preempts_irq) 1933 mask &= ~bhs; 1934 if ((oldmask | mask) & preempts_irq) 1935 mask |= oldmask & bhs; 1936 } 1937 1938 return mask ?: RCUTORTURE_RDR_RCU_1; 1939 } 1940 1941 /* 1942 * Do a randomly selected number of extensions of an existing RCU read-side 1943 * critical section. 1944 */ 1945 static struct rt_read_seg * 1946 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1947 struct rt_read_seg *rtrsp) 1948 { 1949 int i; 1950 int j; 1951 int mask = rcutorture_extend_mask_max(); 1952 1953 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1954 if (!((mask - 1) & mask)) 1955 return rtrsp; /* Current RCU reader not extendable. */ 1956 /* Bias towards larger numbers of loops. */ 1957 i = torture_random(trsp); 1958 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1959 for (j = 0; j < i; j++) { 1960 mask = rcutorture_extend_mask(*readstate, trsp); 1961 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1962 } 1963 return &rtrsp[j]; 1964 } 1965 1966 /* 1967 * Do one read-side critical section, returning false if there was 1968 * no data to read. Can be invoked both from process context and 1969 * from a timer handler. 1970 */ 1971 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1972 { 1973 bool checkpolling = !(torture_random(trsp) & 0xfff); 1974 unsigned long cookie; 1975 struct rcu_gp_oldstate cookie_full; 1976 int i; 1977 unsigned long started; 1978 unsigned long completed; 1979 int newstate; 1980 struct rcu_torture *p; 1981 int pipe_count; 1982 int readstate = 0; 1983 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1984 struct rt_read_seg *rtrsp = &rtseg[0]; 1985 struct rt_read_seg *rtrsp1; 1986 unsigned long long ts; 1987 1988 WARN_ON_ONCE(!rcu_is_watching()); 1989 newstate = rcutorture_extend_mask(readstate, trsp); 1990 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1991 if (checkpolling) { 1992 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1993 cookie = cur_ops->get_gp_state(); 1994 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1995 cur_ops->get_gp_state_full(&cookie_full); 1996 } 1997 started = cur_ops->get_gp_seq(); 1998 ts = rcu_trace_clock_local(); 1999 p = rcu_dereference_check(rcu_torture_current, 2000 !cur_ops->readlock_held || cur_ops->readlock_held()); 2001 if (p == NULL) { 2002 /* Wait for rcu_torture_writer to get underway */ 2003 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2004 return false; 2005 } 2006 if (p->rtort_mbtest == 0) 2007 atomic_inc(&n_rcu_torture_mberror); 2008 rcu_torture_reader_do_mbchk(myid, p, trsp); 2009 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 2010 preempt_disable(); 2011 pipe_count = READ_ONCE(p->rtort_pipe_count); 2012 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2013 // Should not happen in a correct RCU implementation, 2014 // happens quite often for torture_type=busted. 2015 pipe_count = RCU_TORTURE_PIPE_LEN; 2016 } 2017 completed = cur_ops->get_gp_seq(); 2018 if (pipe_count > 1) { 2019 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 2020 ts, started, completed); 2021 rcu_ftrace_dump(DUMP_ALL); 2022 } 2023 __this_cpu_inc(rcu_torture_count[pipe_count]); 2024 completed = rcutorture_seq_diff(completed, started); 2025 if (completed > RCU_TORTURE_PIPE_LEN) { 2026 /* Should not happen, but... */ 2027 completed = RCU_TORTURE_PIPE_LEN; 2028 } 2029 __this_cpu_inc(rcu_torture_batch[completed]); 2030 preempt_enable(); 2031 if (checkpolling) { 2032 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2033 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2034 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2035 __func__, 2036 rcu_torture_writer_state_getname(), 2037 rcu_torture_writer_state, 2038 cookie, cur_ops->get_gp_state()); 2039 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2040 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2041 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2042 __func__, 2043 rcu_torture_writer_state_getname(), 2044 rcu_torture_writer_state, 2045 cpumask_pr_args(cpu_online_mask)); 2046 } 2047 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2048 WARN_ON_ONCE(readstate); 2049 // This next splat is expected behavior if leakpointer, especially 2050 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2051 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2052 2053 /* If error or close call, record the sequence of reader protections. */ 2054 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2055 i = 0; 2056 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2057 err_segs[i++] = *rtrsp1; 2058 rt_read_nsegs = i; 2059 } 2060 2061 return true; 2062 } 2063 2064 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2065 2066 /* 2067 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2068 * incrementing the corresponding element of the pipeline array. The 2069 * counter in the element should never be greater than 1, otherwise, the 2070 * RCU implementation is broken. 2071 */ 2072 static void rcu_torture_timer(struct timer_list *unused) 2073 { 2074 atomic_long_inc(&n_rcu_torture_timers); 2075 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2076 2077 /* Test call_rcu() invocation from interrupt handler. */ 2078 if (cur_ops->call) { 2079 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2080 2081 if (rhp) 2082 cur_ops->call(rhp, rcu_torture_timer_cb); 2083 } 2084 } 2085 2086 /* 2087 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2088 * incrementing the corresponding element of the pipeline array. The 2089 * counter in the element should never be greater than 1, otherwise, the 2090 * RCU implementation is broken. 2091 */ 2092 static int 2093 rcu_torture_reader(void *arg) 2094 { 2095 unsigned long lastsleep = jiffies; 2096 long myid = (long)arg; 2097 int mynumonline = myid; 2098 DEFINE_TORTURE_RANDOM(rand); 2099 struct timer_list t; 2100 2101 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2102 set_user_nice(current, MAX_NICE); 2103 if (irqreader && cur_ops->irq_capable) 2104 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2105 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2106 do { 2107 if (irqreader && cur_ops->irq_capable) { 2108 if (!timer_pending(&t)) 2109 mod_timer(&t, jiffies + 1); 2110 } 2111 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2112 schedule_timeout_interruptible(HZ); 2113 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2114 torture_hrtimeout_us(500, 1000, &rand); 2115 lastsleep = jiffies + 10; 2116 } 2117 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2118 schedule_timeout_interruptible(HZ / 5); 2119 stutter_wait("rcu_torture_reader"); 2120 } while (!torture_must_stop()); 2121 if (irqreader && cur_ops->irq_capable) { 2122 del_timer_sync(&t); 2123 destroy_timer_on_stack(&t); 2124 } 2125 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2126 torture_kthread_stopping("rcu_torture_reader"); 2127 return 0; 2128 } 2129 2130 /* 2131 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2132 * increase race probabilities and fuzzes the interval between toggling. 2133 */ 2134 static int rcu_nocb_toggle(void *arg) 2135 { 2136 int cpu; 2137 int maxcpu = -1; 2138 int oldnice = task_nice(current); 2139 long r; 2140 DEFINE_TORTURE_RANDOM(rand); 2141 ktime_t toggle_delay; 2142 unsigned long toggle_fuzz; 2143 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2144 2145 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2146 while (!rcu_inkernel_boot_has_ended()) 2147 schedule_timeout_interruptible(HZ / 10); 2148 for_each_possible_cpu(cpu) 2149 maxcpu = cpu; 2150 WARN_ON(maxcpu < 0); 2151 if (toggle_interval > ULONG_MAX) 2152 toggle_fuzz = ULONG_MAX >> 3; 2153 else 2154 toggle_fuzz = toggle_interval >> 3; 2155 if (toggle_fuzz <= 0) 2156 toggle_fuzz = NSEC_PER_USEC; 2157 do { 2158 r = torture_random(&rand); 2159 cpu = (r >> 1) % (maxcpu + 1); 2160 if (r & 0x1) { 2161 rcu_nocb_cpu_offload(cpu); 2162 atomic_long_inc(&n_nocb_offload); 2163 } else { 2164 rcu_nocb_cpu_deoffload(cpu); 2165 atomic_long_inc(&n_nocb_deoffload); 2166 } 2167 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2168 set_current_state(TASK_INTERRUPTIBLE); 2169 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2170 if (stutter_wait("rcu_nocb_toggle")) 2171 sched_set_normal(current, oldnice); 2172 } while (!torture_must_stop()); 2173 torture_kthread_stopping("rcu_nocb_toggle"); 2174 return 0; 2175 } 2176 2177 /* 2178 * Print torture statistics. Caller must ensure that there is only 2179 * one call to this function at a given time!!! This is normally 2180 * accomplished by relying on the module system to only have one copy 2181 * of the module loaded, and then by giving the rcu_torture_stats 2182 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2183 * thread is not running). 2184 */ 2185 static void 2186 rcu_torture_stats_print(void) 2187 { 2188 int cpu; 2189 int i; 2190 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2191 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2192 struct rcu_torture *rtcp; 2193 static unsigned long rtcv_snap = ULONG_MAX; 2194 static bool splatted; 2195 struct task_struct *wtp; 2196 2197 for_each_possible_cpu(cpu) { 2198 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2199 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2200 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2201 } 2202 } 2203 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2204 if (pipesummary[i] != 0) 2205 break; 2206 } 2207 2208 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2209 rtcp = rcu_access_pointer(rcu_torture_current); 2210 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2211 rtcp, 2212 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2213 rcu_torture_current_version, 2214 list_empty(&rcu_torture_freelist), 2215 atomic_read(&n_rcu_torture_alloc), 2216 atomic_read(&n_rcu_torture_alloc_fail), 2217 atomic_read(&n_rcu_torture_free)); 2218 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2219 atomic_read(&n_rcu_torture_mberror), 2220 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2221 n_rcu_torture_barrier_error, 2222 n_rcu_torture_boost_ktrerror); 2223 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2224 n_rcu_torture_boost_failure, 2225 n_rcu_torture_boosts, 2226 atomic_long_read(&n_rcu_torture_timers)); 2227 torture_onoff_stats(); 2228 pr_cont("barrier: %ld/%ld:%ld ", 2229 data_race(n_barrier_successes), 2230 data_race(n_barrier_attempts), 2231 data_race(n_rcu_torture_barrier_error)); 2232 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2233 pr_cont("nocb-toggles: %ld:%ld\n", 2234 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2235 2236 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2237 if (atomic_read(&n_rcu_torture_mberror) || 2238 atomic_read(&n_rcu_torture_mbchk_fail) || 2239 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2240 n_rcu_torture_boost_failure || i > 1) { 2241 pr_cont("%s", "!!! "); 2242 atomic_inc(&n_rcu_torture_error); 2243 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2244 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2245 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2246 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2247 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2248 WARN_ON_ONCE(i > 1); // Too-short grace period 2249 } 2250 pr_cont("Reader Pipe: "); 2251 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2252 pr_cont(" %ld", pipesummary[i]); 2253 pr_cont("\n"); 2254 2255 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2256 pr_cont("Reader Batch: "); 2257 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2258 pr_cont(" %ld", batchsummary[i]); 2259 pr_cont("\n"); 2260 2261 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2262 pr_cont("Free-Block Circulation: "); 2263 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2264 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2265 } 2266 pr_cont("\n"); 2267 2268 if (cur_ops->stats) 2269 cur_ops->stats(); 2270 if (rtcv_snap == rcu_torture_current_version && 2271 rcu_access_pointer(rcu_torture_current) && 2272 !rcu_stall_is_suppressed()) { 2273 int __maybe_unused flags = 0; 2274 unsigned long __maybe_unused gp_seq = 0; 2275 2276 if (cur_ops->get_gp_data) 2277 cur_ops->get_gp_data(&flags, &gp_seq); 2278 wtp = READ_ONCE(writer_task); 2279 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2280 rcu_torture_writer_state_getname(), 2281 rcu_torture_writer_state, gp_seq, flags, 2282 wtp == NULL ? ~0U : wtp->__state, 2283 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2284 if (!splatted && wtp) { 2285 sched_show_task(wtp); 2286 splatted = true; 2287 } 2288 if (cur_ops->gp_kthread_dbg) 2289 cur_ops->gp_kthread_dbg(); 2290 rcu_ftrace_dump(DUMP_ALL); 2291 } 2292 rtcv_snap = rcu_torture_current_version; 2293 } 2294 2295 /* 2296 * Periodically prints torture statistics, if periodic statistics printing 2297 * was specified via the stat_interval module parameter. 2298 */ 2299 static int 2300 rcu_torture_stats(void *arg) 2301 { 2302 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2303 do { 2304 schedule_timeout_interruptible(stat_interval * HZ); 2305 rcu_torture_stats_print(); 2306 torture_shutdown_absorb("rcu_torture_stats"); 2307 } while (!torture_must_stop()); 2308 torture_kthread_stopping("rcu_torture_stats"); 2309 return 0; 2310 } 2311 2312 /* Test mem_dump_obj() and friends. */ 2313 static void rcu_torture_mem_dump_obj(void) 2314 { 2315 struct rcu_head *rhp; 2316 struct kmem_cache *kcp; 2317 static int z; 2318 2319 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2320 if (WARN_ON_ONCE(!kcp)) 2321 return; 2322 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2323 if (WARN_ON_ONCE(!rhp)) { 2324 kmem_cache_destroy(kcp); 2325 return; 2326 } 2327 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2328 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2329 mem_dump_obj(ZERO_SIZE_PTR); 2330 pr_alert("mem_dump_obj(NULL):"); 2331 mem_dump_obj(NULL); 2332 pr_alert("mem_dump_obj(%px):", &rhp); 2333 mem_dump_obj(&rhp); 2334 pr_alert("mem_dump_obj(%px):", rhp); 2335 mem_dump_obj(rhp); 2336 pr_alert("mem_dump_obj(%px):", &rhp->func); 2337 mem_dump_obj(&rhp->func); 2338 pr_alert("mem_dump_obj(%px):", &z); 2339 mem_dump_obj(&z); 2340 kmem_cache_free(kcp, rhp); 2341 kmem_cache_destroy(kcp); 2342 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2343 if (WARN_ON_ONCE(!rhp)) 2344 return; 2345 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2346 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2347 mem_dump_obj(rhp); 2348 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2349 mem_dump_obj(&rhp->func); 2350 kfree(rhp); 2351 rhp = vmalloc(4096); 2352 if (WARN_ON_ONCE(!rhp)) 2353 return; 2354 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2355 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2356 mem_dump_obj(rhp); 2357 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2358 mem_dump_obj(&rhp->func); 2359 vfree(rhp); 2360 } 2361 2362 static void 2363 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2364 { 2365 pr_alert("%s" TORTURE_FLAG 2366 "--- %s: nreaders=%d nfakewriters=%d " 2367 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2368 "shuffle_interval=%d stutter=%d irqreader=%d " 2369 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2370 "test_boost=%d/%d test_boost_interval=%d " 2371 "test_boost_duration=%d shutdown_secs=%d " 2372 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2373 "stall_cpu_block=%d " 2374 "n_barrier_cbs=%d " 2375 "onoff_interval=%d onoff_holdoff=%d " 2376 "read_exit_delay=%d read_exit_burst=%d " 2377 "nocbs_nthreads=%d nocbs_toggle=%d " 2378 "test_nmis=%d\n", 2379 torture_type, tag, nrealreaders, nfakewriters, 2380 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2381 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2382 test_boost, cur_ops->can_boost, 2383 test_boost_interval, test_boost_duration, shutdown_secs, 2384 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2385 stall_cpu_block, 2386 n_barrier_cbs, 2387 onoff_interval, onoff_holdoff, 2388 read_exit_delay, read_exit_burst, 2389 nocbs_nthreads, nocbs_toggle, 2390 test_nmis); 2391 } 2392 2393 static int rcutorture_booster_cleanup(unsigned int cpu) 2394 { 2395 struct task_struct *t; 2396 2397 if (boost_tasks[cpu] == NULL) 2398 return 0; 2399 mutex_lock(&boost_mutex); 2400 t = boost_tasks[cpu]; 2401 boost_tasks[cpu] = NULL; 2402 rcu_torture_enable_rt_throttle(); 2403 mutex_unlock(&boost_mutex); 2404 2405 /* This must be outside of the mutex, otherwise deadlock! */ 2406 torture_stop_kthread(rcu_torture_boost, t); 2407 return 0; 2408 } 2409 2410 static int rcutorture_booster_init(unsigned int cpu) 2411 { 2412 int retval; 2413 2414 if (boost_tasks[cpu] != NULL) 2415 return 0; /* Already created, nothing more to do. */ 2416 2417 // Testing RCU priority boosting requires rcutorture do 2418 // some serious abuse. Counter this by running ksoftirqd 2419 // at higher priority. 2420 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2421 struct sched_param sp; 2422 struct task_struct *t; 2423 2424 t = per_cpu(ksoftirqd, cpu); 2425 WARN_ON_ONCE(!t); 2426 sp.sched_priority = 2; 2427 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2428 } 2429 2430 /* Don't allow time recalculation while creating a new task. */ 2431 mutex_lock(&boost_mutex); 2432 rcu_torture_disable_rt_throttle(); 2433 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2434 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2435 cpu, "rcu_torture_boost_%u"); 2436 if (IS_ERR(boost_tasks[cpu])) { 2437 retval = PTR_ERR(boost_tasks[cpu]); 2438 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2439 n_rcu_torture_boost_ktrerror++; 2440 boost_tasks[cpu] = NULL; 2441 mutex_unlock(&boost_mutex); 2442 return retval; 2443 } 2444 mutex_unlock(&boost_mutex); 2445 return 0; 2446 } 2447 2448 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2449 { 2450 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2451 return NOTIFY_OK; 2452 } 2453 2454 static struct notifier_block rcu_torture_stall_block = { 2455 .notifier_call = rcu_torture_stall_nf, 2456 }; 2457 2458 /* 2459 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2460 * induces a CPU stall for the time specified by stall_cpu. If a new 2461 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 2462 */ 2463 static int rcu_torture_stall(void *args) 2464 { 2465 int idx; 2466 int ret; 2467 unsigned long stop_at; 2468 2469 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2470 if (rcu_cpu_stall_notifiers) { 2471 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 2472 if (ret) 2473 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 2474 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 2475 } 2476 if (stall_cpu_holdoff > 0) { 2477 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2478 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2479 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2480 } 2481 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2482 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2483 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2484 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2485 if (kthread_should_stop()) 2486 break; 2487 schedule_timeout_uninterruptible(HZ); 2488 } 2489 } 2490 if (!kthread_should_stop() && stall_cpu > 0) { 2491 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2492 stop_at = ktime_get_seconds() + stall_cpu; 2493 /* RCU CPU stall is expected behavior in following code. */ 2494 idx = cur_ops->readlock(); 2495 if (stall_cpu_irqsoff) 2496 local_irq_disable(); 2497 else if (!stall_cpu_block) 2498 preempt_disable(); 2499 pr_alert("%s start on CPU %d.\n", 2500 __func__, raw_smp_processor_id()); 2501 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 2502 !kthread_should_stop()) 2503 if (stall_cpu_block) { 2504 #ifdef CONFIG_PREEMPTION 2505 preempt_schedule(); 2506 #else 2507 schedule_timeout_uninterruptible(HZ); 2508 #endif 2509 } else if (stall_no_softlockup) { 2510 touch_softlockup_watchdog(); 2511 } 2512 if (stall_cpu_irqsoff) 2513 local_irq_enable(); 2514 else if (!stall_cpu_block) 2515 preempt_enable(); 2516 cur_ops->readunlock(idx); 2517 } 2518 pr_alert("%s end.\n", __func__); 2519 if (rcu_cpu_stall_notifiers && !ret) { 2520 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 2521 if (ret) 2522 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 2523 } 2524 torture_shutdown_absorb("rcu_torture_stall"); 2525 while (!kthread_should_stop()) 2526 schedule_timeout_interruptible(10 * HZ); 2527 return 0; 2528 } 2529 2530 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2531 static int __init rcu_torture_stall_init(void) 2532 { 2533 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2534 return 0; 2535 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2536 } 2537 2538 /* State structure for forward-progress self-propagating RCU callback. */ 2539 struct fwd_cb_state { 2540 struct rcu_head rh; 2541 int stop; 2542 }; 2543 2544 /* 2545 * Forward-progress self-propagating RCU callback function. Because 2546 * callbacks run from softirq, this function is an implicit RCU read-side 2547 * critical section. 2548 */ 2549 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2550 { 2551 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2552 2553 if (READ_ONCE(fcsp->stop)) { 2554 WRITE_ONCE(fcsp->stop, 2); 2555 return; 2556 } 2557 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2558 } 2559 2560 /* State for continuous-flood RCU callbacks. */ 2561 struct rcu_fwd_cb { 2562 struct rcu_head rh; 2563 struct rcu_fwd_cb *rfc_next; 2564 struct rcu_fwd *rfc_rfp; 2565 int rfc_gps; 2566 }; 2567 2568 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2569 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2570 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2571 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2572 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2573 2574 struct rcu_launder_hist { 2575 long n_launders; 2576 unsigned long launder_gp_seq; 2577 }; 2578 2579 struct rcu_fwd { 2580 spinlock_t rcu_fwd_lock; 2581 struct rcu_fwd_cb *rcu_fwd_cb_head; 2582 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2583 long n_launders_cb; 2584 unsigned long rcu_fwd_startat; 2585 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2586 unsigned long rcu_launder_gp_seq_start; 2587 int rcu_fwd_id; 2588 }; 2589 2590 static DEFINE_MUTEX(rcu_fwd_mutex); 2591 static struct rcu_fwd *rcu_fwds; 2592 static unsigned long rcu_fwd_seq; 2593 static atomic_long_t rcu_fwd_max_cbs; 2594 static bool rcu_fwd_emergency_stop; 2595 2596 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2597 { 2598 unsigned long gps; 2599 unsigned long gps_old; 2600 int i; 2601 int j; 2602 2603 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2604 if (rfp->n_launders_hist[i].n_launders > 0) 2605 break; 2606 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2607 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2608 gps_old = rfp->rcu_launder_gp_seq_start; 2609 for (j = 0; j <= i; j++) { 2610 gps = rfp->n_launders_hist[j].launder_gp_seq; 2611 pr_cont(" %ds/%d: %ld:%ld", 2612 j + 1, FWD_CBS_HIST_DIV, 2613 rfp->n_launders_hist[j].n_launders, 2614 rcutorture_seq_diff(gps, gps_old)); 2615 gps_old = gps; 2616 } 2617 pr_cont("\n"); 2618 } 2619 2620 /* Callback function for continuous-flood RCU callbacks. */ 2621 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2622 { 2623 unsigned long flags; 2624 int i; 2625 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2626 struct rcu_fwd_cb **rfcpp; 2627 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2628 2629 rfcp->rfc_next = NULL; 2630 rfcp->rfc_gps++; 2631 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2632 rfcpp = rfp->rcu_fwd_cb_tail; 2633 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2634 smp_store_release(rfcpp, rfcp); 2635 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2636 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2637 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2638 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2639 rfp->n_launders_hist[i].n_launders++; 2640 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2641 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2642 } 2643 2644 // Give the scheduler a chance, even on nohz_full CPUs. 2645 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2646 { 2647 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2648 // Real call_rcu() floods hit userspace, so emulate that. 2649 if (need_resched() || (iter & 0xfff)) 2650 schedule(); 2651 return; 2652 } 2653 // No userspace emulation: CB invocation throttles call_rcu() 2654 cond_resched(); 2655 } 2656 2657 /* 2658 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2659 * test is over or because we hit an OOM event. 2660 */ 2661 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2662 { 2663 unsigned long flags; 2664 unsigned long freed = 0; 2665 struct rcu_fwd_cb *rfcp; 2666 2667 for (;;) { 2668 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2669 rfcp = rfp->rcu_fwd_cb_head; 2670 if (!rfcp) { 2671 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2672 break; 2673 } 2674 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2675 if (!rfp->rcu_fwd_cb_head) 2676 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2677 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2678 kfree(rfcp); 2679 freed++; 2680 rcu_torture_fwd_prog_cond_resched(freed); 2681 if (tick_nohz_full_enabled()) { 2682 local_irq_save(flags); 2683 rcu_momentary_dyntick_idle(); 2684 local_irq_restore(flags); 2685 } 2686 } 2687 return freed; 2688 } 2689 2690 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2691 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2692 int *tested, int *tested_tries) 2693 { 2694 unsigned long cver; 2695 unsigned long dur; 2696 struct fwd_cb_state fcs; 2697 unsigned long gps; 2698 int idx; 2699 int sd; 2700 int sd4; 2701 bool selfpropcb = false; 2702 unsigned long stopat; 2703 static DEFINE_TORTURE_RANDOM(trs); 2704 2705 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2706 if (!cur_ops->sync) 2707 return; // Cannot do need_resched() forward progress testing without ->sync. 2708 if (cur_ops->call && cur_ops->cb_barrier) { 2709 init_rcu_head_on_stack(&fcs.rh); 2710 selfpropcb = true; 2711 } 2712 2713 /* Tight loop containing cond_resched(). */ 2714 atomic_inc(&rcu_fwd_cb_nodelay); 2715 cur_ops->sync(); /* Later readers see above write. */ 2716 if (selfpropcb) { 2717 WRITE_ONCE(fcs.stop, 0); 2718 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2719 } 2720 cver = READ_ONCE(rcu_torture_current_version); 2721 gps = cur_ops->get_gp_seq(); 2722 sd = cur_ops->stall_dur() + 1; 2723 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2724 dur = sd4 + torture_random(&trs) % (sd - sd4); 2725 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2726 stopat = rfp->rcu_fwd_startat + dur; 2727 while (time_before(jiffies, stopat) && 2728 !shutdown_time_arrived() && 2729 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2730 idx = cur_ops->readlock(); 2731 udelay(10); 2732 cur_ops->readunlock(idx); 2733 if (!fwd_progress_need_resched || need_resched()) 2734 cond_resched(); 2735 } 2736 (*tested_tries)++; 2737 if (!time_before(jiffies, stopat) && 2738 !shutdown_time_arrived() && 2739 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2740 (*tested)++; 2741 cver = READ_ONCE(rcu_torture_current_version) - cver; 2742 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2743 WARN_ON(!cver && gps < 2); 2744 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2745 rfp->rcu_fwd_id, dur, cver, gps); 2746 } 2747 if (selfpropcb) { 2748 WRITE_ONCE(fcs.stop, 1); 2749 cur_ops->sync(); /* Wait for running CB to complete. */ 2750 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2751 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2752 } 2753 2754 if (selfpropcb) { 2755 WARN_ON(READ_ONCE(fcs.stop) != 2); 2756 destroy_rcu_head_on_stack(&fcs.rh); 2757 } 2758 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2759 atomic_dec(&rcu_fwd_cb_nodelay); 2760 } 2761 2762 /* Carry out call_rcu() forward-progress testing. */ 2763 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2764 { 2765 unsigned long cver; 2766 unsigned long flags; 2767 unsigned long gps; 2768 int i; 2769 long n_launders; 2770 long n_launders_cb_snap; 2771 long n_launders_sa; 2772 long n_max_cbs; 2773 long n_max_gps; 2774 struct rcu_fwd_cb *rfcp; 2775 struct rcu_fwd_cb *rfcpn; 2776 unsigned long stopat; 2777 unsigned long stoppedat; 2778 2779 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2780 if (READ_ONCE(rcu_fwd_emergency_stop)) 2781 return; /* Get out of the way quickly, no GP wait! */ 2782 if (!cur_ops->call) 2783 return; /* Can't do call_rcu() fwd prog without ->call. */ 2784 2785 /* Loop continuously posting RCU callbacks. */ 2786 atomic_inc(&rcu_fwd_cb_nodelay); 2787 cur_ops->sync(); /* Later readers see above write. */ 2788 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2789 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2790 n_launders = 0; 2791 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2792 n_launders_sa = 0; 2793 n_max_cbs = 0; 2794 n_max_gps = 0; 2795 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2796 rfp->n_launders_hist[i].n_launders = 0; 2797 cver = READ_ONCE(rcu_torture_current_version); 2798 gps = cur_ops->get_gp_seq(); 2799 rfp->rcu_launder_gp_seq_start = gps; 2800 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2801 while (time_before(jiffies, stopat) && 2802 !shutdown_time_arrived() && 2803 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2804 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2805 rfcpn = NULL; 2806 if (rfcp) 2807 rfcpn = READ_ONCE(rfcp->rfc_next); 2808 if (rfcpn) { 2809 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2810 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2811 break; 2812 rfp->rcu_fwd_cb_head = rfcpn; 2813 n_launders++; 2814 n_launders_sa++; 2815 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2816 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2817 if (WARN_ON_ONCE(!rfcp)) { 2818 schedule_timeout_interruptible(1); 2819 continue; 2820 } 2821 n_max_cbs++; 2822 n_launders_sa = 0; 2823 rfcp->rfc_gps = 0; 2824 rfcp->rfc_rfp = rfp; 2825 } else { 2826 rfcp = NULL; 2827 } 2828 if (rfcp) 2829 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2830 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2831 if (tick_nohz_full_enabled()) { 2832 local_irq_save(flags); 2833 rcu_momentary_dyntick_idle(); 2834 local_irq_restore(flags); 2835 } 2836 } 2837 stoppedat = jiffies; 2838 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2839 cver = READ_ONCE(rcu_torture_current_version) - cver; 2840 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2841 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2842 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2843 (void)rcu_torture_fwd_prog_cbfree(rfp); 2844 2845 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2846 !shutdown_time_arrived()) { 2847 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 2848 cur_ops->gp_kthread_dbg(); 2849 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 2850 __func__, 2851 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2852 n_launders + n_max_cbs - n_launders_cb_snap, 2853 n_launders, n_launders_sa, 2854 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 2855 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2856 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2857 rcu_torture_fwd_cb_hist(rfp); 2858 mutex_unlock(&rcu_fwd_mutex); 2859 } 2860 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2861 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2862 atomic_dec(&rcu_fwd_cb_nodelay); 2863 } 2864 2865 2866 /* 2867 * OOM notifier, but this only prints diagnostic information for the 2868 * current forward-progress test. 2869 */ 2870 static int rcutorture_oom_notify(struct notifier_block *self, 2871 unsigned long notused, void *nfreed) 2872 { 2873 int i; 2874 long ncbs; 2875 struct rcu_fwd *rfp; 2876 2877 mutex_lock(&rcu_fwd_mutex); 2878 rfp = rcu_fwds; 2879 if (!rfp) { 2880 mutex_unlock(&rcu_fwd_mutex); 2881 return NOTIFY_OK; 2882 } 2883 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2884 __func__); 2885 for (i = 0; i < fwd_progress; i++) { 2886 rcu_torture_fwd_cb_hist(&rfp[i]); 2887 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2888 } 2889 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2890 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2891 ncbs = 0; 2892 for (i = 0; i < fwd_progress; i++) 2893 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2894 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2895 cur_ops->cb_barrier(); 2896 ncbs = 0; 2897 for (i = 0; i < fwd_progress; i++) 2898 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2899 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2900 cur_ops->cb_barrier(); 2901 ncbs = 0; 2902 for (i = 0; i < fwd_progress; i++) 2903 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2904 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2905 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2906 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2907 pr_info("%s returning after OOM processing.\n", __func__); 2908 mutex_unlock(&rcu_fwd_mutex); 2909 return NOTIFY_OK; 2910 } 2911 2912 static struct notifier_block rcutorture_oom_nb = { 2913 .notifier_call = rcutorture_oom_notify 2914 }; 2915 2916 /* Carry out grace-period forward-progress testing. */ 2917 static int rcu_torture_fwd_prog(void *args) 2918 { 2919 bool firsttime = true; 2920 long max_cbs; 2921 int oldnice = task_nice(current); 2922 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2923 struct rcu_fwd *rfp = args; 2924 int tested = 0; 2925 int tested_tries = 0; 2926 2927 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2928 rcu_bind_current_to_nocb(); 2929 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2930 set_user_nice(current, MAX_NICE); 2931 do { 2932 if (!rfp->rcu_fwd_id) { 2933 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2934 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2935 if (!firsttime) { 2936 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2937 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2938 } 2939 firsttime = false; 2940 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2941 } else { 2942 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2943 schedule_timeout_interruptible(HZ / 20); 2944 oldseq = READ_ONCE(rcu_fwd_seq); 2945 } 2946 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2947 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2948 rcu_torture_fwd_prog_cr(rfp); 2949 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2950 (!IS_ENABLED(CONFIG_TINY_RCU) || 2951 (rcu_inkernel_boot_has_ended() && 2952 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2953 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2954 2955 /* Avoid slow periods, better to test when busy. */ 2956 if (stutter_wait("rcu_torture_fwd_prog")) 2957 sched_set_normal(current, oldnice); 2958 } while (!torture_must_stop()); 2959 /* Short runs might not contain a valid forward-progress attempt. */ 2960 if (!rfp->rcu_fwd_id) { 2961 WARN_ON(!tested && tested_tries >= 5); 2962 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2963 } 2964 torture_kthread_stopping("rcu_torture_fwd_prog"); 2965 return 0; 2966 } 2967 2968 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2969 static int __init rcu_torture_fwd_prog_init(void) 2970 { 2971 int i; 2972 int ret = 0; 2973 struct rcu_fwd *rfp; 2974 2975 if (!fwd_progress) 2976 return 0; /* Not requested, so don't do it. */ 2977 if (fwd_progress >= nr_cpu_ids) { 2978 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2979 fwd_progress = nr_cpu_ids; 2980 } else if (fwd_progress < 0) { 2981 fwd_progress = nr_cpu_ids; 2982 } 2983 if ((!cur_ops->sync && !cur_ops->call) || 2984 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2985 cur_ops == &rcu_busted_ops) { 2986 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2987 fwd_progress = 0; 2988 return 0; 2989 } 2990 if (stall_cpu > 0) { 2991 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2992 fwd_progress = 0; 2993 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2994 return -EINVAL; /* In module, can fail back to user. */ 2995 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2996 return 0; 2997 } 2998 if (fwd_progress_holdoff <= 0) 2999 fwd_progress_holdoff = 1; 3000 if (fwd_progress_div <= 0) 3001 fwd_progress_div = 4; 3002 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 3003 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 3004 if (!rfp || !fwd_prog_tasks) { 3005 kfree(rfp); 3006 kfree(fwd_prog_tasks); 3007 fwd_prog_tasks = NULL; 3008 fwd_progress = 0; 3009 return -ENOMEM; 3010 } 3011 for (i = 0; i < fwd_progress; i++) { 3012 spin_lock_init(&rfp[i].rcu_fwd_lock); 3013 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3014 rfp[i].rcu_fwd_id = i; 3015 } 3016 mutex_lock(&rcu_fwd_mutex); 3017 rcu_fwds = rfp; 3018 mutex_unlock(&rcu_fwd_mutex); 3019 register_oom_notifier(&rcutorture_oom_nb); 3020 for (i = 0; i < fwd_progress; i++) { 3021 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3022 if (ret) { 3023 fwd_progress = i; 3024 return ret; 3025 } 3026 } 3027 return 0; 3028 } 3029 3030 static void rcu_torture_fwd_prog_cleanup(void) 3031 { 3032 int i; 3033 struct rcu_fwd *rfp; 3034 3035 if (!rcu_fwds || !fwd_prog_tasks) 3036 return; 3037 for (i = 0; i < fwd_progress; i++) 3038 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3039 unregister_oom_notifier(&rcutorture_oom_nb); 3040 mutex_lock(&rcu_fwd_mutex); 3041 rfp = rcu_fwds; 3042 rcu_fwds = NULL; 3043 mutex_unlock(&rcu_fwd_mutex); 3044 kfree(rfp); 3045 kfree(fwd_prog_tasks); 3046 fwd_prog_tasks = NULL; 3047 } 3048 3049 /* Callback function for RCU barrier testing. */ 3050 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3051 { 3052 atomic_inc(&barrier_cbs_invoked); 3053 } 3054 3055 /* IPI handler to get callback posted on desired CPU, if online. */ 3056 static int rcu_torture_barrier1cb(void *rcu_void) 3057 { 3058 struct rcu_head *rhp = rcu_void; 3059 3060 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3061 return 0; 3062 } 3063 3064 /* kthread function to register callbacks used to test RCU barriers. */ 3065 static int rcu_torture_barrier_cbs(void *arg) 3066 { 3067 long myid = (long)arg; 3068 bool lastphase = false; 3069 bool newphase; 3070 struct rcu_head rcu; 3071 3072 init_rcu_head_on_stack(&rcu); 3073 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3074 set_user_nice(current, MAX_NICE); 3075 do { 3076 wait_event(barrier_cbs_wq[myid], 3077 (newphase = 3078 smp_load_acquire(&barrier_phase)) != lastphase || 3079 torture_must_stop()); 3080 lastphase = newphase; 3081 if (torture_must_stop()) 3082 break; 3083 /* 3084 * The above smp_load_acquire() ensures barrier_phase load 3085 * is ordered before the following ->call(). 3086 */ 3087 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3088 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3089 3090 if (atomic_dec_and_test(&barrier_cbs_count)) 3091 wake_up(&barrier_wq); 3092 } while (!torture_must_stop()); 3093 if (cur_ops->cb_barrier != NULL) 3094 cur_ops->cb_barrier(); 3095 destroy_rcu_head_on_stack(&rcu); 3096 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3097 return 0; 3098 } 3099 3100 /* kthread function to drive and coordinate RCU barrier testing. */ 3101 static int rcu_torture_barrier(void *arg) 3102 { 3103 int i; 3104 3105 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3106 do { 3107 atomic_set(&barrier_cbs_invoked, 0); 3108 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3109 /* Ensure barrier_phase ordered after prior assignments. */ 3110 smp_store_release(&barrier_phase, !barrier_phase); 3111 for (i = 0; i < n_barrier_cbs; i++) 3112 wake_up(&barrier_cbs_wq[i]); 3113 wait_event(barrier_wq, 3114 atomic_read(&barrier_cbs_count) == 0 || 3115 torture_must_stop()); 3116 if (torture_must_stop()) 3117 break; 3118 n_barrier_attempts++; 3119 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3120 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3121 n_rcu_torture_barrier_error++; 3122 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3123 atomic_read(&barrier_cbs_invoked), 3124 n_barrier_cbs); 3125 WARN_ON(1); 3126 // Wait manually for the remaining callbacks 3127 i = 0; 3128 do { 3129 if (WARN_ON(i++ > HZ)) 3130 i = INT_MIN; 3131 schedule_timeout_interruptible(1); 3132 cur_ops->cb_barrier(); 3133 } while (atomic_read(&barrier_cbs_invoked) != 3134 n_barrier_cbs && 3135 !torture_must_stop()); 3136 smp_mb(); // Can't trust ordering if broken. 3137 if (!torture_must_stop()) 3138 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3139 atomic_read(&barrier_cbs_invoked)); 3140 } else { 3141 n_barrier_successes++; 3142 } 3143 schedule_timeout_interruptible(HZ / 10); 3144 } while (!torture_must_stop()); 3145 torture_kthread_stopping("rcu_torture_barrier"); 3146 return 0; 3147 } 3148 3149 /* Initialize RCU barrier testing. */ 3150 static int rcu_torture_barrier_init(void) 3151 { 3152 int i; 3153 int ret; 3154 3155 if (n_barrier_cbs <= 0) 3156 return 0; 3157 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3158 pr_alert("%s" TORTURE_FLAG 3159 " Call or barrier ops missing for %s,\n", 3160 torture_type, cur_ops->name); 3161 pr_alert("%s" TORTURE_FLAG 3162 " RCU barrier testing omitted from run.\n", 3163 torture_type); 3164 return 0; 3165 } 3166 atomic_set(&barrier_cbs_count, 0); 3167 atomic_set(&barrier_cbs_invoked, 0); 3168 barrier_cbs_tasks = 3169 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3170 GFP_KERNEL); 3171 barrier_cbs_wq = 3172 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3173 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3174 return -ENOMEM; 3175 for (i = 0; i < n_barrier_cbs; i++) { 3176 init_waitqueue_head(&barrier_cbs_wq[i]); 3177 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3178 (void *)(long)i, 3179 barrier_cbs_tasks[i]); 3180 if (ret) 3181 return ret; 3182 } 3183 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3184 } 3185 3186 /* Clean up after RCU barrier testing. */ 3187 static void rcu_torture_barrier_cleanup(void) 3188 { 3189 int i; 3190 3191 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3192 if (barrier_cbs_tasks != NULL) { 3193 for (i = 0; i < n_barrier_cbs; i++) 3194 torture_stop_kthread(rcu_torture_barrier_cbs, 3195 barrier_cbs_tasks[i]); 3196 kfree(barrier_cbs_tasks); 3197 barrier_cbs_tasks = NULL; 3198 } 3199 if (barrier_cbs_wq != NULL) { 3200 kfree(barrier_cbs_wq); 3201 barrier_cbs_wq = NULL; 3202 } 3203 } 3204 3205 static bool rcu_torture_can_boost(void) 3206 { 3207 static int boost_warn_once; 3208 int prio; 3209 3210 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3211 return false; 3212 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3213 return false; 3214 3215 prio = rcu_get_gp_kthreads_prio(); 3216 if (!prio) 3217 return false; 3218 3219 if (prio < 2) { 3220 if (boost_warn_once == 1) 3221 return false; 3222 3223 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3224 boost_warn_once = 1; 3225 return false; 3226 } 3227 3228 return true; 3229 } 3230 3231 static bool read_exit_child_stop; 3232 static bool read_exit_child_stopped; 3233 static wait_queue_head_t read_exit_wq; 3234 3235 // Child kthread which just does an rcutorture reader and exits. 3236 static int rcu_torture_read_exit_child(void *trsp_in) 3237 { 3238 struct torture_random_state *trsp = trsp_in; 3239 3240 set_user_nice(current, MAX_NICE); 3241 // Minimize time between reading and exiting. 3242 while (!kthread_should_stop()) 3243 schedule_timeout_uninterruptible(HZ / 20); 3244 (void)rcu_torture_one_read(trsp, -1); 3245 return 0; 3246 } 3247 3248 // Parent kthread which creates and destroys read-exit child kthreads. 3249 static int rcu_torture_read_exit(void *unused) 3250 { 3251 bool errexit = false; 3252 int i; 3253 struct task_struct *tsp; 3254 DEFINE_TORTURE_RANDOM(trs); 3255 3256 // Allocate and initialize. 3257 set_user_nice(current, MAX_NICE); 3258 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3259 3260 // Each pass through this loop does one read-exit episode. 3261 do { 3262 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3263 for (i = 0; i < read_exit_burst; i++) { 3264 if (READ_ONCE(read_exit_child_stop)) 3265 break; 3266 stutter_wait("rcu_torture_read_exit"); 3267 // Spawn child. 3268 tsp = kthread_run(rcu_torture_read_exit_child, 3269 &trs, "%s", "rcu_torture_read_exit_child"); 3270 if (IS_ERR(tsp)) { 3271 TOROUT_ERRSTRING("out of memory"); 3272 errexit = true; 3273 break; 3274 } 3275 cond_resched(); 3276 kthread_stop(tsp); 3277 n_read_exits++; 3278 } 3279 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3280 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3281 i = 0; 3282 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3283 schedule_timeout_uninterruptible(HZ); 3284 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3285 3286 // Clean up and exit. 3287 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3288 smp_mb(); // Store before wakeup. 3289 wake_up(&read_exit_wq); 3290 while (!torture_must_stop()) 3291 schedule_timeout_uninterruptible(HZ / 20); 3292 torture_kthread_stopping("rcu_torture_read_exit"); 3293 return 0; 3294 } 3295 3296 static int rcu_torture_read_exit_init(void) 3297 { 3298 if (read_exit_burst <= 0) 3299 return 0; 3300 init_waitqueue_head(&read_exit_wq); 3301 read_exit_child_stop = false; 3302 read_exit_child_stopped = false; 3303 return torture_create_kthread(rcu_torture_read_exit, NULL, 3304 read_exit_task); 3305 } 3306 3307 static void rcu_torture_read_exit_cleanup(void) 3308 { 3309 if (!read_exit_task) 3310 return; 3311 WRITE_ONCE(read_exit_child_stop, true); 3312 smp_mb(); // Above write before wait. 3313 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3314 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3315 } 3316 3317 static void rcutorture_test_nmis(int n) 3318 { 3319 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3320 int cpu; 3321 int dumpcpu; 3322 int i; 3323 3324 for (i = 0; i < n; i++) { 3325 preempt_disable(); 3326 cpu = smp_processor_id(); 3327 dumpcpu = cpu + 1; 3328 if (dumpcpu >= nr_cpu_ids) 3329 dumpcpu = 0; 3330 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3331 dump_cpu_task(dumpcpu); 3332 preempt_enable(); 3333 schedule_timeout_uninterruptible(15 * HZ); 3334 } 3335 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3336 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3337 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3338 } 3339 3340 static enum cpuhp_state rcutor_hp; 3341 3342 static void 3343 rcu_torture_cleanup(void) 3344 { 3345 int firsttime; 3346 int flags = 0; 3347 unsigned long gp_seq = 0; 3348 int i; 3349 3350 if (torture_cleanup_begin()) { 3351 if (cur_ops->cb_barrier != NULL) { 3352 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3353 cur_ops->cb_barrier(); 3354 } 3355 if (cur_ops->gp_slow_unregister) 3356 cur_ops->gp_slow_unregister(NULL); 3357 return; 3358 } 3359 if (!cur_ops) { 3360 torture_cleanup_end(); 3361 return; 3362 } 3363 3364 rcutorture_test_nmis(test_nmis); 3365 3366 if (cur_ops->gp_kthread_dbg) 3367 cur_ops->gp_kthread_dbg(); 3368 rcu_torture_read_exit_cleanup(); 3369 rcu_torture_barrier_cleanup(); 3370 rcu_torture_fwd_prog_cleanup(); 3371 torture_stop_kthread(rcu_torture_stall, stall_task); 3372 torture_stop_kthread(rcu_torture_writer, writer_task); 3373 3374 if (nocb_tasks) { 3375 for (i = 0; i < nrealnocbers; i++) 3376 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3377 kfree(nocb_tasks); 3378 nocb_tasks = NULL; 3379 } 3380 3381 if (reader_tasks) { 3382 for (i = 0; i < nrealreaders; i++) 3383 torture_stop_kthread(rcu_torture_reader, 3384 reader_tasks[i]); 3385 kfree(reader_tasks); 3386 reader_tasks = NULL; 3387 } 3388 kfree(rcu_torture_reader_mbchk); 3389 rcu_torture_reader_mbchk = NULL; 3390 3391 if (fakewriter_tasks) { 3392 for (i = 0; i < nfakewriters; i++) 3393 torture_stop_kthread(rcu_torture_fakewriter, 3394 fakewriter_tasks[i]); 3395 kfree(fakewriter_tasks); 3396 fakewriter_tasks = NULL; 3397 } 3398 3399 if (cur_ops->get_gp_data) 3400 cur_ops->get_gp_data(&flags, &gp_seq); 3401 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3402 cur_ops->name, (long)gp_seq, flags, 3403 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3404 torture_stop_kthread(rcu_torture_stats, stats_task); 3405 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3406 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3407 cpuhp_remove_state(rcutor_hp); 3408 3409 /* 3410 * Wait for all RCU callbacks to fire, then do torture-type-specific 3411 * cleanup operations. 3412 */ 3413 if (cur_ops->cb_barrier != NULL) { 3414 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3415 cur_ops->cb_barrier(); 3416 } 3417 if (cur_ops->cleanup != NULL) 3418 cur_ops->cleanup(); 3419 3420 rcu_torture_mem_dump_obj(); 3421 3422 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3423 3424 if (err_segs_recorded) { 3425 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3426 if (rt_read_nsegs == 0) 3427 pr_alert("\t: No segments recorded!!!\n"); 3428 firsttime = 1; 3429 for (i = 0; i < rt_read_nsegs; i++) { 3430 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3431 if (err_segs[i].rt_delay_jiffies != 0) { 3432 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3433 err_segs[i].rt_delay_jiffies); 3434 firsttime = 0; 3435 } 3436 if (err_segs[i].rt_delay_ms != 0) { 3437 pr_cont("%s%ldms", firsttime ? "" : "+", 3438 err_segs[i].rt_delay_ms); 3439 firsttime = 0; 3440 } 3441 if (err_segs[i].rt_delay_us != 0) { 3442 pr_cont("%s%ldus", firsttime ? "" : "+", 3443 err_segs[i].rt_delay_us); 3444 firsttime = 0; 3445 } 3446 pr_cont("%s\n", 3447 err_segs[i].rt_preempted ? "preempted" : ""); 3448 3449 } 3450 } 3451 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3452 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3453 else if (torture_onoff_failures()) 3454 rcu_torture_print_module_parms(cur_ops, 3455 "End of test: RCU_HOTPLUG"); 3456 else 3457 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3458 torture_cleanup_end(); 3459 if (cur_ops->gp_slow_unregister) 3460 cur_ops->gp_slow_unregister(NULL); 3461 } 3462 3463 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3464 { 3465 } 3466 3467 static void rcu_torture_err_cb(struct rcu_head *rhp) 3468 { 3469 /* 3470 * This -might- happen due to race conditions, but is unlikely. 3471 * The scenario that leads to this happening is that the 3472 * first of the pair of duplicate callbacks is queued, 3473 * someone else starts a grace period that includes that 3474 * callback, then the second of the pair must wait for the 3475 * next grace period. Unlikely, but can happen. If it 3476 * does happen, the debug-objects subsystem won't have splatted. 3477 */ 3478 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3479 } 3480 3481 /* 3482 * Verify that double-free causes debug-objects to complain, but only 3483 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3484 * cannot be carried out. 3485 */ 3486 static void rcu_test_debug_objects(void) 3487 { 3488 struct rcu_head rh1; 3489 struct rcu_head rh2; 3490 int idx; 3491 3492 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 3493 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 3494 KBUILD_MODNAME, cur_ops->name); 3495 return; 3496 } 3497 3498 if (WARN_ON_ONCE(cur_ops->debug_objects && 3499 (!cur_ops->call || !cur_ops->cb_barrier))) 3500 return; 3501 3502 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3503 3504 init_rcu_head_on_stack(&rh1); 3505 init_rcu_head_on_stack(&rh2); 3506 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 3507 3508 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3509 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 3510 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3511 cur_ops->call(&rh2, rcu_torture_leak_cb); 3512 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3513 if (rhp) { 3514 cur_ops->call(rhp, rcu_torture_leak_cb); 3515 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3516 } 3517 cur_ops->readunlock(idx); 3518 3519 /* Wait for them all to get done so we can safely return. */ 3520 cur_ops->cb_barrier(); 3521 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 3522 destroy_rcu_head_on_stack(&rh1); 3523 destroy_rcu_head_on_stack(&rh2); 3524 kfree(rhp); 3525 } 3526 3527 static void rcutorture_sync(void) 3528 { 3529 static unsigned long n; 3530 3531 if (cur_ops->sync && !(++n & 0xfff)) 3532 cur_ops->sync(); 3533 } 3534 3535 static DEFINE_MUTEX(mut0); 3536 static DEFINE_MUTEX(mut1); 3537 static DEFINE_MUTEX(mut2); 3538 static DEFINE_MUTEX(mut3); 3539 static DEFINE_MUTEX(mut4); 3540 static DEFINE_MUTEX(mut5); 3541 static DEFINE_MUTEX(mut6); 3542 static DEFINE_MUTEX(mut7); 3543 static DEFINE_MUTEX(mut8); 3544 static DEFINE_MUTEX(mut9); 3545 3546 static DECLARE_RWSEM(rwsem0); 3547 static DECLARE_RWSEM(rwsem1); 3548 static DECLARE_RWSEM(rwsem2); 3549 static DECLARE_RWSEM(rwsem3); 3550 static DECLARE_RWSEM(rwsem4); 3551 static DECLARE_RWSEM(rwsem5); 3552 static DECLARE_RWSEM(rwsem6); 3553 static DECLARE_RWSEM(rwsem7); 3554 static DECLARE_RWSEM(rwsem8); 3555 static DECLARE_RWSEM(rwsem9); 3556 3557 DEFINE_STATIC_SRCU(srcu0); 3558 DEFINE_STATIC_SRCU(srcu1); 3559 DEFINE_STATIC_SRCU(srcu2); 3560 DEFINE_STATIC_SRCU(srcu3); 3561 DEFINE_STATIC_SRCU(srcu4); 3562 DEFINE_STATIC_SRCU(srcu5); 3563 DEFINE_STATIC_SRCU(srcu6); 3564 DEFINE_STATIC_SRCU(srcu7); 3565 DEFINE_STATIC_SRCU(srcu8); 3566 DEFINE_STATIC_SRCU(srcu9); 3567 3568 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 3569 int cyclelen, int deadlock) 3570 { 3571 int j = i + 1; 3572 3573 if (j >= cyclelen) 3574 j = deadlock ? 0 : -1; 3575 if (j >= 0) 3576 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 3577 else 3578 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 3579 return j; 3580 } 3581 3582 // Test lockdep on SRCU-based deadlock scenarios. 3583 static void rcu_torture_init_srcu_lockdep(void) 3584 { 3585 int cyclelen; 3586 int deadlock; 3587 bool err = false; 3588 int i; 3589 int j; 3590 int idx; 3591 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 3592 &mut5, &mut6, &mut7, &mut8, &mut9 }; 3593 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 3594 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 3595 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 3596 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 3597 int testtype; 3598 3599 if (!test_srcu_lockdep) 3600 return; 3601 3602 deadlock = test_srcu_lockdep / 1000; 3603 testtype = (test_srcu_lockdep / 10) % 100; 3604 cyclelen = test_srcu_lockdep % 10; 3605 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 3606 if (WARN_ONCE(deadlock != !!deadlock, 3607 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 3608 __func__, test_srcu_lockdep, deadlock)) 3609 err = true; 3610 if (WARN_ONCE(cyclelen <= 0, 3611 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 3612 __func__, test_srcu_lockdep, cyclelen)) 3613 err = true; 3614 if (err) 3615 goto err_out; 3616 3617 if (testtype == 0) { 3618 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 3619 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3620 if (deadlock && cyclelen == 1) 3621 pr_info("%s: Expect hang.\n", __func__); 3622 for (i = 0; i < cyclelen; i++) { 3623 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 3624 "srcu_read_unlock", i, cyclelen, deadlock); 3625 idx = srcu_read_lock(srcus[i]); 3626 if (j >= 0) 3627 synchronize_srcu(srcus[j]); 3628 srcu_read_unlock(srcus[i], idx); 3629 } 3630 return; 3631 } 3632 3633 if (testtype == 1) { 3634 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 3635 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3636 for (i = 0; i < cyclelen; i++) { 3637 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 3638 __func__, i, i, i, i); 3639 idx = srcu_read_lock(srcus[i]); 3640 mutex_lock(muts[i]); 3641 mutex_unlock(muts[i]); 3642 srcu_read_unlock(srcus[i], idx); 3643 3644 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 3645 "mutex_unlock", i, cyclelen, deadlock); 3646 mutex_lock(muts[i]); 3647 if (j >= 0) 3648 synchronize_srcu(srcus[j]); 3649 mutex_unlock(muts[i]); 3650 } 3651 return; 3652 } 3653 3654 if (testtype == 2) { 3655 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 3656 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3657 for (i = 0; i < cyclelen; i++) { 3658 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 3659 __func__, i, i, i, i); 3660 idx = srcu_read_lock(srcus[i]); 3661 down_read(rwsems[i]); 3662 up_read(rwsems[i]); 3663 srcu_read_unlock(srcus[i], idx); 3664 3665 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 3666 "up_write", i, cyclelen, deadlock); 3667 down_write(rwsems[i]); 3668 if (j >= 0) 3669 synchronize_srcu(srcus[j]); 3670 up_write(rwsems[i]); 3671 } 3672 return; 3673 } 3674 3675 #ifdef CONFIG_TASKS_TRACE_RCU 3676 if (testtype == 3) { 3677 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 3678 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3679 if (deadlock && cyclelen == 1) 3680 pr_info("%s: Expect hang.\n", __func__); 3681 for (i = 0; i < cyclelen; i++) { 3682 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 3683 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 3684 : "synchronize_srcu"; 3685 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 3686 3687 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 3688 if (i == 0) 3689 rcu_read_lock_trace(); 3690 else 3691 idx = srcu_read_lock(srcus[i]); 3692 if (j >= 0) { 3693 if (i == cyclelen - 1) 3694 synchronize_rcu_tasks_trace(); 3695 else 3696 synchronize_srcu(srcus[j]); 3697 } 3698 if (i == 0) 3699 rcu_read_unlock_trace(); 3700 else 3701 srcu_read_unlock(srcus[i], idx); 3702 } 3703 return; 3704 } 3705 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 3706 3707 err_out: 3708 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 3709 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 3710 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 3711 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 3712 pr_info("%s: L: Cycle length.\n", __func__); 3713 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 3714 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 3715 } 3716 3717 static int __init 3718 rcu_torture_init(void) 3719 { 3720 long i; 3721 int cpu; 3722 int firsterr = 0; 3723 int flags = 0; 3724 unsigned long gp_seq = 0; 3725 static struct rcu_torture_ops *torture_ops[] = { 3726 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3727 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3728 &trivial_ops, 3729 }; 3730 3731 if (!torture_init_begin(torture_type, verbose)) 3732 return -EBUSY; 3733 3734 /* Process args and tell the world that the torturer is on the job. */ 3735 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3736 cur_ops = torture_ops[i]; 3737 if (strcmp(torture_type, cur_ops->name) == 0) 3738 break; 3739 } 3740 if (i == ARRAY_SIZE(torture_ops)) { 3741 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3742 torture_type); 3743 pr_alert("rcu-torture types:"); 3744 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3745 pr_cont(" %s", torture_ops[i]->name); 3746 pr_cont("\n"); 3747 firsterr = -EINVAL; 3748 cur_ops = NULL; 3749 goto unwind; 3750 } 3751 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3752 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3753 fqs_duration = 0; 3754 } 3755 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 3756 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3757 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 3758 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 3759 nocbs_nthreads = 0; 3760 } 3761 if (cur_ops->init) 3762 cur_ops->init(); 3763 3764 rcu_torture_init_srcu_lockdep(); 3765 3766 if (nreaders >= 0) { 3767 nrealreaders = nreaders; 3768 } else { 3769 nrealreaders = num_online_cpus() - 2 - nreaders; 3770 if (nrealreaders <= 0) 3771 nrealreaders = 1; 3772 } 3773 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3774 if (cur_ops->get_gp_data) 3775 cur_ops->get_gp_data(&flags, &gp_seq); 3776 start_gp_seq = gp_seq; 3777 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3778 cur_ops->name, (long)gp_seq, flags); 3779 3780 /* Set up the freelist. */ 3781 3782 INIT_LIST_HEAD(&rcu_torture_freelist); 3783 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3784 rcu_tortures[i].rtort_mbtest = 0; 3785 list_add_tail(&rcu_tortures[i].rtort_free, 3786 &rcu_torture_freelist); 3787 } 3788 3789 /* Initialize the statistics so that each run gets its own numbers. */ 3790 3791 rcu_torture_current = NULL; 3792 rcu_torture_current_version = 0; 3793 atomic_set(&n_rcu_torture_alloc, 0); 3794 atomic_set(&n_rcu_torture_alloc_fail, 0); 3795 atomic_set(&n_rcu_torture_free, 0); 3796 atomic_set(&n_rcu_torture_mberror, 0); 3797 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3798 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3799 atomic_set(&n_rcu_torture_error, 0); 3800 n_rcu_torture_barrier_error = 0; 3801 n_rcu_torture_boost_ktrerror = 0; 3802 n_rcu_torture_boost_failure = 0; 3803 n_rcu_torture_boosts = 0; 3804 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3805 atomic_set(&rcu_torture_wcount[i], 0); 3806 for_each_possible_cpu(cpu) { 3807 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3808 per_cpu(rcu_torture_count, cpu)[i] = 0; 3809 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3810 } 3811 } 3812 err_segs_recorded = 0; 3813 rt_read_nsegs = 0; 3814 3815 /* Start up the kthreads. */ 3816 3817 rcu_torture_write_types(); 3818 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3819 writer_task); 3820 if (torture_init_error(firsterr)) 3821 goto unwind; 3822 if (nfakewriters > 0) { 3823 fakewriter_tasks = kcalloc(nfakewriters, 3824 sizeof(fakewriter_tasks[0]), 3825 GFP_KERNEL); 3826 if (fakewriter_tasks == NULL) { 3827 TOROUT_ERRSTRING("out of memory"); 3828 firsterr = -ENOMEM; 3829 goto unwind; 3830 } 3831 } 3832 for (i = 0; i < nfakewriters; i++) { 3833 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3834 NULL, fakewriter_tasks[i]); 3835 if (torture_init_error(firsterr)) 3836 goto unwind; 3837 } 3838 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3839 GFP_KERNEL); 3840 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3841 GFP_KERNEL); 3842 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3843 TOROUT_ERRSTRING("out of memory"); 3844 firsterr = -ENOMEM; 3845 goto unwind; 3846 } 3847 for (i = 0; i < nrealreaders; i++) { 3848 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3849 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3850 reader_tasks[i]); 3851 if (torture_init_error(firsterr)) 3852 goto unwind; 3853 } 3854 nrealnocbers = nocbs_nthreads; 3855 if (WARN_ON(nrealnocbers < 0)) 3856 nrealnocbers = 1; 3857 if (WARN_ON(nocbs_toggle < 0)) 3858 nocbs_toggle = HZ; 3859 if (nrealnocbers > 0) { 3860 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3861 if (nocb_tasks == NULL) { 3862 TOROUT_ERRSTRING("out of memory"); 3863 firsterr = -ENOMEM; 3864 goto unwind; 3865 } 3866 } else { 3867 nocb_tasks = NULL; 3868 } 3869 for (i = 0; i < nrealnocbers; i++) { 3870 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3871 if (torture_init_error(firsterr)) 3872 goto unwind; 3873 } 3874 if (stat_interval > 0) { 3875 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3876 stats_task); 3877 if (torture_init_error(firsterr)) 3878 goto unwind; 3879 } 3880 if (test_no_idle_hz && shuffle_interval > 0) { 3881 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3882 if (torture_init_error(firsterr)) 3883 goto unwind; 3884 } 3885 if (stutter < 0) 3886 stutter = 0; 3887 if (stutter) { 3888 int t; 3889 3890 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3891 firsterr = torture_stutter_init(stutter * HZ, t); 3892 if (torture_init_error(firsterr)) 3893 goto unwind; 3894 } 3895 if (fqs_duration < 0) 3896 fqs_duration = 0; 3897 if (fqs_holdoff < 0) 3898 fqs_holdoff = 0; 3899 if (fqs_duration && fqs_holdoff) { 3900 /* Create the fqs thread */ 3901 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3902 fqs_task); 3903 if (torture_init_error(firsterr)) 3904 goto unwind; 3905 } 3906 if (test_boost_interval < 1) 3907 test_boost_interval = 1; 3908 if (test_boost_duration < 2) 3909 test_boost_duration = 2; 3910 if (rcu_torture_can_boost()) { 3911 3912 boost_starttime = jiffies + test_boost_interval * HZ; 3913 3914 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3915 rcutorture_booster_init, 3916 rcutorture_booster_cleanup); 3917 rcutor_hp = firsterr; 3918 if (torture_init_error(firsterr)) 3919 goto unwind; 3920 } 3921 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3922 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3923 if (torture_init_error(firsterr)) 3924 goto unwind; 3925 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3926 rcutorture_sync); 3927 if (torture_init_error(firsterr)) 3928 goto unwind; 3929 firsterr = rcu_torture_stall_init(); 3930 if (torture_init_error(firsterr)) 3931 goto unwind; 3932 firsterr = rcu_torture_fwd_prog_init(); 3933 if (torture_init_error(firsterr)) 3934 goto unwind; 3935 firsterr = rcu_torture_barrier_init(); 3936 if (torture_init_error(firsterr)) 3937 goto unwind; 3938 firsterr = rcu_torture_read_exit_init(); 3939 if (torture_init_error(firsterr)) 3940 goto unwind; 3941 if (object_debug) 3942 rcu_test_debug_objects(); 3943 torture_init_end(); 3944 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 3945 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 3946 return 0; 3947 3948 unwind: 3949 torture_init_end(); 3950 rcu_torture_cleanup(); 3951 if (shutdown_secs) { 3952 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3953 kernel_power_off(); 3954 } 3955 return firsterr; 3956 } 3957 3958 module_init(rcu_torture_init); 3959 module_exit(rcu_torture_cleanup); 3960