1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 /* Bits for ->extendables field, extendables param, and related definitions. */ 59 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 62 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 64 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 65 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 66 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 67 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 68 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 69 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 70 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 71 #define RCUTORTURE_MAX_EXTEND \ 72 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 73 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 74 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 75 /* Must be power of two minus one. */ 76 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 77 78 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 79 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 80 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 81 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 82 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 83 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 84 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 85 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 86 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 87 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 88 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 89 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 90 torture_param(bool, gp_cond_exp_full, false, 91 "Use conditional/async full-stateexpedited GP wait primitives"); 92 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 93 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 94 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 95 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 96 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 97 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 98 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 99 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 100 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 101 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 102 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 103 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 104 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 105 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 106 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 107 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 108 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 109 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 110 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 114 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 115 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 116 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 117 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 118 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 119 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 120 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 121 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 122 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 123 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 124 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 125 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 126 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 127 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 128 129 static char *torture_type = "rcu"; 130 module_param(torture_type, charp, 0444); 131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 132 133 static int nrealnocbers; 134 static int nrealreaders; 135 static struct task_struct *writer_task; 136 static struct task_struct **fakewriter_tasks; 137 static struct task_struct **reader_tasks; 138 static struct task_struct **nocb_tasks; 139 static struct task_struct *stats_task; 140 static struct task_struct *fqs_task; 141 static struct task_struct *boost_tasks[NR_CPUS]; 142 static struct task_struct *stall_task; 143 static struct task_struct **fwd_prog_tasks; 144 static struct task_struct **barrier_cbs_tasks; 145 static struct task_struct *barrier_task; 146 static struct task_struct *read_exit_task; 147 148 #define RCU_TORTURE_PIPE_LEN 10 149 150 // Mailbox-like structure to check RCU global memory ordering. 151 struct rcu_torture_reader_check { 152 unsigned long rtc_myloops; 153 int rtc_chkrdr; 154 unsigned long rtc_chkloops; 155 int rtc_ready; 156 struct rcu_torture_reader_check *rtc_assigner; 157 } ____cacheline_internodealigned_in_smp; 158 159 // Update-side data structure used to check RCU readers. 160 struct rcu_torture { 161 struct rcu_head rtort_rcu; 162 int rtort_pipe_count; 163 struct list_head rtort_free; 164 int rtort_mbtest; 165 struct rcu_torture_reader_check *rtort_chkp; 166 }; 167 168 static LIST_HEAD(rcu_torture_freelist); 169 static struct rcu_torture __rcu *rcu_torture_current; 170 static unsigned long rcu_torture_current_version; 171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 172 static DEFINE_SPINLOCK(rcu_torture_lock); 173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 177 static atomic_t n_rcu_torture_alloc; 178 static atomic_t n_rcu_torture_alloc_fail; 179 static atomic_t n_rcu_torture_free; 180 static atomic_t n_rcu_torture_mberror; 181 static atomic_t n_rcu_torture_mbchk_fail; 182 static atomic_t n_rcu_torture_mbchk_tries; 183 static atomic_t n_rcu_torture_error; 184 static long n_rcu_torture_barrier_error; 185 static long n_rcu_torture_boost_ktrerror; 186 static long n_rcu_torture_boost_failure; 187 static long n_rcu_torture_boosts; 188 static atomic_long_t n_rcu_torture_timers; 189 static long n_barrier_attempts; 190 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 191 static unsigned long n_read_exits; 192 static struct list_head rcu_torture_removed; 193 static unsigned long shutdown_jiffies; 194 static unsigned long start_gp_seq; 195 static atomic_long_t n_nocb_offload; 196 static atomic_long_t n_nocb_deoffload; 197 198 static int rcu_torture_writer_state; 199 #define RTWS_FIXED_DELAY 0 200 #define RTWS_DELAY 1 201 #define RTWS_REPLACE 2 202 #define RTWS_DEF_FREE 3 203 #define RTWS_EXP_SYNC 4 204 #define RTWS_COND_GET 5 205 #define RTWS_COND_GET_FULL 6 206 #define RTWS_COND_GET_EXP 7 207 #define RTWS_COND_GET_EXP_FULL 8 208 #define RTWS_COND_SYNC 9 209 #define RTWS_COND_SYNC_FULL 10 210 #define RTWS_COND_SYNC_EXP 11 211 #define RTWS_COND_SYNC_EXP_FULL 12 212 #define RTWS_POLL_GET 13 213 #define RTWS_POLL_GET_FULL 14 214 #define RTWS_POLL_GET_EXP 15 215 #define RTWS_POLL_GET_EXP_FULL 16 216 #define RTWS_POLL_WAIT 17 217 #define RTWS_POLL_WAIT_FULL 18 218 #define RTWS_POLL_WAIT_EXP 19 219 #define RTWS_POLL_WAIT_EXP_FULL 20 220 #define RTWS_SYNC 21 221 #define RTWS_STUTTER 22 222 #define RTWS_STOPPING 23 223 static const char * const rcu_torture_writer_state_names[] = { 224 "RTWS_FIXED_DELAY", 225 "RTWS_DELAY", 226 "RTWS_REPLACE", 227 "RTWS_DEF_FREE", 228 "RTWS_EXP_SYNC", 229 "RTWS_COND_GET", 230 "RTWS_COND_GET_FULL", 231 "RTWS_COND_GET_EXP", 232 "RTWS_COND_GET_EXP_FULL", 233 "RTWS_COND_SYNC", 234 "RTWS_COND_SYNC_FULL", 235 "RTWS_COND_SYNC_EXP", 236 "RTWS_COND_SYNC_EXP_FULL", 237 "RTWS_POLL_GET", 238 "RTWS_POLL_GET_FULL", 239 "RTWS_POLL_GET_EXP", 240 "RTWS_POLL_GET_EXP_FULL", 241 "RTWS_POLL_WAIT", 242 "RTWS_POLL_WAIT_FULL", 243 "RTWS_POLL_WAIT_EXP", 244 "RTWS_POLL_WAIT_EXP_FULL", 245 "RTWS_SYNC", 246 "RTWS_STUTTER", 247 "RTWS_STOPPING", 248 }; 249 250 /* Record reader segment types and duration for first failing read. */ 251 struct rt_read_seg { 252 int rt_readstate; 253 unsigned long rt_delay_jiffies; 254 unsigned long rt_delay_ms; 255 unsigned long rt_delay_us; 256 bool rt_preempted; 257 }; 258 static int err_segs_recorded; 259 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 260 static int rt_read_nsegs; 261 262 static const char *rcu_torture_writer_state_getname(void) 263 { 264 unsigned int i = READ_ONCE(rcu_torture_writer_state); 265 266 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 267 return "???"; 268 return rcu_torture_writer_state_names[i]; 269 } 270 271 #ifdef CONFIG_RCU_TRACE 272 static u64 notrace rcu_trace_clock_local(void) 273 { 274 u64 ts = trace_clock_local(); 275 276 (void)do_div(ts, NSEC_PER_USEC); 277 return ts; 278 } 279 #else /* #ifdef CONFIG_RCU_TRACE */ 280 static u64 notrace rcu_trace_clock_local(void) 281 { 282 return 0ULL; 283 } 284 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 285 286 /* 287 * Stop aggressive CPU-hog tests a bit before the end of the test in order 288 * to avoid interfering with test shutdown. 289 */ 290 static bool shutdown_time_arrived(void) 291 { 292 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 293 } 294 295 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 296 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 297 /* and boost task create/destroy. */ 298 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 299 static bool barrier_phase; /* Test phase. */ 300 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 301 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 302 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 303 304 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 305 306 /* 307 * Allocate an element from the rcu_tortures pool. 308 */ 309 static struct rcu_torture * 310 rcu_torture_alloc(void) 311 { 312 struct list_head *p; 313 314 spin_lock_bh(&rcu_torture_lock); 315 if (list_empty(&rcu_torture_freelist)) { 316 atomic_inc(&n_rcu_torture_alloc_fail); 317 spin_unlock_bh(&rcu_torture_lock); 318 return NULL; 319 } 320 atomic_inc(&n_rcu_torture_alloc); 321 p = rcu_torture_freelist.next; 322 list_del_init(p); 323 spin_unlock_bh(&rcu_torture_lock); 324 return container_of(p, struct rcu_torture, rtort_free); 325 } 326 327 /* 328 * Free an element to the rcu_tortures pool. 329 */ 330 static void 331 rcu_torture_free(struct rcu_torture *p) 332 { 333 atomic_inc(&n_rcu_torture_free); 334 spin_lock_bh(&rcu_torture_lock); 335 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 336 spin_unlock_bh(&rcu_torture_lock); 337 } 338 339 /* 340 * Operations vector for selecting different types of tests. 341 */ 342 343 struct rcu_torture_ops { 344 int ttype; 345 void (*init)(void); 346 void (*cleanup)(void); 347 int (*readlock)(void); 348 void (*read_delay)(struct torture_random_state *rrsp, 349 struct rt_read_seg *rtrsp); 350 void (*readunlock)(int idx); 351 int (*readlock_held)(void); 352 unsigned long (*get_gp_seq)(void); 353 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 354 void (*deferred_free)(struct rcu_torture *p); 355 void (*sync)(void); 356 void (*exp_sync)(void); 357 unsigned long (*get_gp_state_exp)(void); 358 unsigned long (*start_gp_poll_exp)(void); 359 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 360 bool (*poll_gp_state_exp)(unsigned long oldstate); 361 void (*cond_sync_exp)(unsigned long oldstate); 362 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 363 unsigned long (*get_comp_state)(void); 364 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 365 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 366 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 367 unsigned long (*get_gp_state)(void); 368 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 369 unsigned long (*get_gp_completed)(void); 370 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); 371 unsigned long (*start_gp_poll)(void); 372 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 373 bool (*poll_gp_state)(unsigned long oldstate); 374 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 375 bool (*poll_need_2gp)(bool poll, bool poll_full); 376 void (*cond_sync)(unsigned long oldstate); 377 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 378 call_rcu_func_t call; 379 void (*cb_barrier)(void); 380 void (*fqs)(void); 381 void (*stats)(void); 382 void (*gp_kthread_dbg)(void); 383 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 384 int (*stall_dur)(void); 385 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 386 void (*gp_slow_register)(atomic_t *rgssp); 387 void (*gp_slow_unregister)(atomic_t *rgssp); 388 long cbflood_max; 389 int irq_capable; 390 int can_boost; 391 int extendables; 392 int slow_gps; 393 int no_pi_lock; 394 int debug_objects; 395 const char *name; 396 }; 397 398 static struct rcu_torture_ops *cur_ops; 399 400 /* 401 * Definitions for rcu torture testing. 402 */ 403 404 static int torture_readlock_not_held(void) 405 { 406 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 407 } 408 409 static int rcu_torture_read_lock(void) 410 { 411 rcu_read_lock(); 412 return 0; 413 } 414 415 static void 416 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 417 { 418 unsigned long started; 419 unsigned long completed; 420 const unsigned long shortdelay_us = 200; 421 unsigned long longdelay_ms = 300; 422 unsigned long long ts; 423 424 /* We want a short delay sometimes to make a reader delay the grace 425 * period, and we want a long delay occasionally to trigger 426 * force_quiescent_state. */ 427 428 if (!atomic_read(&rcu_fwd_cb_nodelay) && 429 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 430 started = cur_ops->get_gp_seq(); 431 ts = rcu_trace_clock_local(); 432 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 433 longdelay_ms = 5; /* Avoid triggering BH limits. */ 434 mdelay(longdelay_ms); 435 rtrsp->rt_delay_ms = longdelay_ms; 436 completed = cur_ops->get_gp_seq(); 437 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 438 started, completed); 439 } 440 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 441 udelay(shortdelay_us); 442 rtrsp->rt_delay_us = shortdelay_us; 443 } 444 if (!preempt_count() && 445 !(torture_random(rrsp) % (nrealreaders * 500))) { 446 torture_preempt_schedule(); /* QS only if preemptible. */ 447 rtrsp->rt_preempted = true; 448 } 449 } 450 451 static void rcu_torture_read_unlock(int idx) 452 { 453 rcu_read_unlock(); 454 } 455 456 /* 457 * Update callback in the pipe. This should be invoked after a grace period. 458 */ 459 static bool 460 rcu_torture_pipe_update_one(struct rcu_torture *rp) 461 { 462 int i; 463 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 464 465 if (rtrcp) { 466 WRITE_ONCE(rp->rtort_chkp, NULL); 467 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 468 } 469 i = rp->rtort_pipe_count; 470 if (i > RCU_TORTURE_PIPE_LEN) 471 i = RCU_TORTURE_PIPE_LEN; 472 atomic_inc(&rcu_torture_wcount[i]); 473 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 474 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 475 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 476 rp->rtort_mbtest = 0; 477 return true; 478 } 479 return false; 480 } 481 482 /* 483 * Update all callbacks in the pipe. Suitable for synchronous grace-period 484 * primitives. 485 */ 486 static void 487 rcu_torture_pipe_update(struct rcu_torture *old_rp) 488 { 489 struct rcu_torture *rp; 490 struct rcu_torture *rp1; 491 492 if (old_rp) 493 list_add(&old_rp->rtort_free, &rcu_torture_removed); 494 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 495 if (rcu_torture_pipe_update_one(rp)) { 496 list_del(&rp->rtort_free); 497 rcu_torture_free(rp); 498 } 499 } 500 } 501 502 static void 503 rcu_torture_cb(struct rcu_head *p) 504 { 505 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 506 507 if (torture_must_stop_irq()) { 508 /* Test is ending, just drop callbacks on the floor. */ 509 /* The next initialization will pick up the pieces. */ 510 return; 511 } 512 if (rcu_torture_pipe_update_one(rp)) 513 rcu_torture_free(rp); 514 else 515 cur_ops->deferred_free(rp); 516 } 517 518 static unsigned long rcu_no_completed(void) 519 { 520 return 0; 521 } 522 523 static void rcu_torture_deferred_free(struct rcu_torture *p) 524 { 525 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 526 } 527 528 static void rcu_sync_torture_init(void) 529 { 530 INIT_LIST_HEAD(&rcu_torture_removed); 531 } 532 533 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 534 { 535 return poll; 536 } 537 538 static struct rcu_torture_ops rcu_ops = { 539 .ttype = RCU_FLAVOR, 540 .init = rcu_sync_torture_init, 541 .readlock = rcu_torture_read_lock, 542 .read_delay = rcu_read_delay, 543 .readunlock = rcu_torture_read_unlock, 544 .readlock_held = torture_readlock_not_held, 545 .get_gp_seq = rcu_get_gp_seq, 546 .gp_diff = rcu_seq_diff, 547 .deferred_free = rcu_torture_deferred_free, 548 .sync = synchronize_rcu, 549 .exp_sync = synchronize_rcu_expedited, 550 .same_gp_state = same_state_synchronize_rcu, 551 .same_gp_state_full = same_state_synchronize_rcu_full, 552 .get_comp_state = get_completed_synchronize_rcu, 553 .get_comp_state_full = get_completed_synchronize_rcu_full, 554 .get_gp_state = get_state_synchronize_rcu, 555 .get_gp_state_full = get_state_synchronize_rcu_full, 556 .get_gp_completed = get_completed_synchronize_rcu, 557 .get_gp_completed_full = get_completed_synchronize_rcu_full, 558 .start_gp_poll = start_poll_synchronize_rcu, 559 .start_gp_poll_full = start_poll_synchronize_rcu_full, 560 .poll_gp_state = poll_state_synchronize_rcu, 561 .poll_gp_state_full = poll_state_synchronize_rcu_full, 562 .poll_need_2gp = rcu_poll_need_2gp, 563 .cond_sync = cond_synchronize_rcu, 564 .cond_sync_full = cond_synchronize_rcu_full, 565 .get_gp_state_exp = get_state_synchronize_rcu, 566 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 567 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 568 .poll_gp_state_exp = poll_state_synchronize_rcu, 569 .cond_sync_exp = cond_synchronize_rcu_expedited, 570 .call = call_rcu_hurry, 571 .cb_barrier = rcu_barrier, 572 .fqs = rcu_force_quiescent_state, 573 .gp_kthread_dbg = show_rcu_gp_kthreads, 574 .check_boost_failed = rcu_check_boost_fail, 575 .stall_dur = rcu_jiffies_till_stall_check, 576 .get_gp_data = rcutorture_get_gp_data, 577 .gp_slow_register = rcu_gp_slow_register, 578 .gp_slow_unregister = rcu_gp_slow_unregister, 579 .irq_capable = 1, 580 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 581 .extendables = RCUTORTURE_MAX_EXTEND, 582 .debug_objects = 1, 583 .name = "rcu" 584 }; 585 586 /* 587 * Don't even think about trying any of these in real life!!! 588 * The names includes "busted", and they really means it! 589 * The only purpose of these functions is to provide a buggy RCU 590 * implementation to make sure that rcutorture correctly emits 591 * buggy-RCU error messages. 592 */ 593 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 594 { 595 /* This is a deliberate bug for testing purposes only! */ 596 rcu_torture_cb(&p->rtort_rcu); 597 } 598 599 static void synchronize_rcu_busted(void) 600 { 601 /* This is a deliberate bug for testing purposes only! */ 602 } 603 604 static void 605 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 606 { 607 /* This is a deliberate bug for testing purposes only! */ 608 func(head); 609 } 610 611 static struct rcu_torture_ops rcu_busted_ops = { 612 .ttype = INVALID_RCU_FLAVOR, 613 .init = rcu_sync_torture_init, 614 .readlock = rcu_torture_read_lock, 615 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 616 .readunlock = rcu_torture_read_unlock, 617 .readlock_held = torture_readlock_not_held, 618 .get_gp_seq = rcu_no_completed, 619 .deferred_free = rcu_busted_torture_deferred_free, 620 .sync = synchronize_rcu_busted, 621 .exp_sync = synchronize_rcu_busted, 622 .call = call_rcu_busted, 623 .irq_capable = 1, 624 .name = "busted" 625 }; 626 627 /* 628 * Definitions for srcu torture testing. 629 */ 630 631 DEFINE_STATIC_SRCU(srcu_ctl); 632 static struct srcu_struct srcu_ctld; 633 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 634 static struct rcu_torture_ops srcud_ops; 635 636 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 637 { 638 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 639 } 640 641 static int srcu_torture_read_lock(void) 642 { 643 if (cur_ops == &srcud_ops) 644 return srcu_read_lock_nmisafe(srcu_ctlp); 645 else 646 return srcu_read_lock(srcu_ctlp); 647 } 648 649 static void 650 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 651 { 652 long delay; 653 const long uspertick = 1000000 / HZ; 654 const long longdelay = 10; 655 656 /* We want there to be long-running readers, but not all the time. */ 657 658 delay = torture_random(rrsp) % 659 (nrealreaders * 2 * longdelay * uspertick); 660 if (!delay && in_task()) { 661 schedule_timeout_interruptible(longdelay); 662 rtrsp->rt_delay_jiffies = longdelay; 663 } else { 664 rcu_read_delay(rrsp, rtrsp); 665 } 666 } 667 668 static void srcu_torture_read_unlock(int idx) 669 { 670 if (cur_ops == &srcud_ops) 671 srcu_read_unlock_nmisafe(srcu_ctlp, idx); 672 else 673 srcu_read_unlock(srcu_ctlp, idx); 674 } 675 676 static int torture_srcu_read_lock_held(void) 677 { 678 return srcu_read_lock_held(srcu_ctlp); 679 } 680 681 static unsigned long srcu_torture_completed(void) 682 { 683 return srcu_batches_completed(srcu_ctlp); 684 } 685 686 static void srcu_torture_deferred_free(struct rcu_torture *rp) 687 { 688 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 689 } 690 691 static void srcu_torture_synchronize(void) 692 { 693 synchronize_srcu(srcu_ctlp); 694 } 695 696 static unsigned long srcu_torture_get_gp_state(void) 697 { 698 return get_state_synchronize_srcu(srcu_ctlp); 699 } 700 701 static unsigned long srcu_torture_start_gp_poll(void) 702 { 703 return start_poll_synchronize_srcu(srcu_ctlp); 704 } 705 706 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 707 { 708 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 709 } 710 711 static void srcu_torture_call(struct rcu_head *head, 712 rcu_callback_t func) 713 { 714 call_srcu(srcu_ctlp, head, func); 715 } 716 717 static void srcu_torture_barrier(void) 718 { 719 srcu_barrier(srcu_ctlp); 720 } 721 722 static void srcu_torture_stats(void) 723 { 724 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 725 } 726 727 static void srcu_torture_synchronize_expedited(void) 728 { 729 synchronize_srcu_expedited(srcu_ctlp); 730 } 731 732 static struct rcu_torture_ops srcu_ops = { 733 .ttype = SRCU_FLAVOR, 734 .init = rcu_sync_torture_init, 735 .readlock = srcu_torture_read_lock, 736 .read_delay = srcu_read_delay, 737 .readunlock = srcu_torture_read_unlock, 738 .readlock_held = torture_srcu_read_lock_held, 739 .get_gp_seq = srcu_torture_completed, 740 .deferred_free = srcu_torture_deferred_free, 741 .sync = srcu_torture_synchronize, 742 .exp_sync = srcu_torture_synchronize_expedited, 743 .get_gp_state = srcu_torture_get_gp_state, 744 .start_gp_poll = srcu_torture_start_gp_poll, 745 .poll_gp_state = srcu_torture_poll_gp_state, 746 .call = srcu_torture_call, 747 .cb_barrier = srcu_torture_barrier, 748 .stats = srcu_torture_stats, 749 .get_gp_data = srcu_get_gp_data, 750 .cbflood_max = 50000, 751 .irq_capable = 1, 752 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 753 .debug_objects = 1, 754 .name = "srcu" 755 }; 756 757 static void srcu_torture_init(void) 758 { 759 rcu_sync_torture_init(); 760 WARN_ON(init_srcu_struct(&srcu_ctld)); 761 srcu_ctlp = &srcu_ctld; 762 } 763 764 static void srcu_torture_cleanup(void) 765 { 766 cleanup_srcu_struct(&srcu_ctld); 767 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 768 } 769 770 /* As above, but dynamically allocated. */ 771 static struct rcu_torture_ops srcud_ops = { 772 .ttype = SRCU_FLAVOR, 773 .init = srcu_torture_init, 774 .cleanup = srcu_torture_cleanup, 775 .readlock = srcu_torture_read_lock, 776 .read_delay = srcu_read_delay, 777 .readunlock = srcu_torture_read_unlock, 778 .readlock_held = torture_srcu_read_lock_held, 779 .get_gp_seq = srcu_torture_completed, 780 .deferred_free = srcu_torture_deferred_free, 781 .sync = srcu_torture_synchronize, 782 .exp_sync = srcu_torture_synchronize_expedited, 783 .get_gp_state = srcu_torture_get_gp_state, 784 .start_gp_poll = srcu_torture_start_gp_poll, 785 .poll_gp_state = srcu_torture_poll_gp_state, 786 .call = srcu_torture_call, 787 .cb_barrier = srcu_torture_barrier, 788 .stats = srcu_torture_stats, 789 .get_gp_data = srcu_get_gp_data, 790 .cbflood_max = 50000, 791 .irq_capable = 1, 792 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 793 .debug_objects = 1, 794 .name = "srcud" 795 }; 796 797 /* As above, but broken due to inappropriate reader extension. */ 798 static struct rcu_torture_ops busted_srcud_ops = { 799 .ttype = SRCU_FLAVOR, 800 .init = srcu_torture_init, 801 .cleanup = srcu_torture_cleanup, 802 .readlock = srcu_torture_read_lock, 803 .read_delay = rcu_read_delay, 804 .readunlock = srcu_torture_read_unlock, 805 .readlock_held = torture_srcu_read_lock_held, 806 .get_gp_seq = srcu_torture_completed, 807 .deferred_free = srcu_torture_deferred_free, 808 .sync = srcu_torture_synchronize, 809 .exp_sync = srcu_torture_synchronize_expedited, 810 .call = srcu_torture_call, 811 .cb_barrier = srcu_torture_barrier, 812 .stats = srcu_torture_stats, 813 .irq_capable = 1, 814 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 815 .extendables = RCUTORTURE_MAX_EXTEND, 816 .name = "busted_srcud" 817 }; 818 819 /* 820 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 821 * This implementation does not necessarily work well with CPU hotplug. 822 */ 823 824 static void synchronize_rcu_trivial(void) 825 { 826 int cpu; 827 828 for_each_online_cpu(cpu) { 829 torture_sched_setaffinity(current->pid, cpumask_of(cpu)); 830 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 831 } 832 } 833 834 static int rcu_torture_read_lock_trivial(void) 835 { 836 preempt_disable(); 837 return 0; 838 } 839 840 static void rcu_torture_read_unlock_trivial(int idx) 841 { 842 preempt_enable(); 843 } 844 845 static struct rcu_torture_ops trivial_ops = { 846 .ttype = RCU_TRIVIAL_FLAVOR, 847 .init = rcu_sync_torture_init, 848 .readlock = rcu_torture_read_lock_trivial, 849 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 850 .readunlock = rcu_torture_read_unlock_trivial, 851 .readlock_held = torture_readlock_not_held, 852 .get_gp_seq = rcu_no_completed, 853 .sync = synchronize_rcu_trivial, 854 .exp_sync = synchronize_rcu_trivial, 855 .irq_capable = 1, 856 .name = "trivial" 857 }; 858 859 #ifdef CONFIG_TASKS_RCU 860 861 /* 862 * Definitions for RCU-tasks torture testing. 863 */ 864 865 static int tasks_torture_read_lock(void) 866 { 867 return 0; 868 } 869 870 static void tasks_torture_read_unlock(int idx) 871 { 872 } 873 874 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 875 { 876 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 877 } 878 879 static void synchronize_rcu_mult_test(void) 880 { 881 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 882 } 883 884 static struct rcu_torture_ops tasks_ops = { 885 .ttype = RCU_TASKS_FLAVOR, 886 .init = rcu_sync_torture_init, 887 .readlock = tasks_torture_read_lock, 888 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 889 .readunlock = tasks_torture_read_unlock, 890 .get_gp_seq = rcu_no_completed, 891 .deferred_free = rcu_tasks_torture_deferred_free, 892 .sync = synchronize_rcu_tasks, 893 .exp_sync = synchronize_rcu_mult_test, 894 .call = call_rcu_tasks, 895 .cb_barrier = rcu_barrier_tasks, 896 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 897 .get_gp_data = rcu_tasks_get_gp_data, 898 .irq_capable = 1, 899 .slow_gps = 1, 900 .name = "tasks" 901 }; 902 903 #define TASKS_OPS &tasks_ops, 904 905 #else // #ifdef CONFIG_TASKS_RCU 906 907 #define TASKS_OPS 908 909 #endif // #else #ifdef CONFIG_TASKS_RCU 910 911 912 #ifdef CONFIG_TASKS_RUDE_RCU 913 914 /* 915 * Definitions for rude RCU-tasks torture testing. 916 */ 917 918 static struct rcu_torture_ops tasks_rude_ops = { 919 .ttype = RCU_TASKS_RUDE_FLAVOR, 920 .init = rcu_sync_torture_init, 921 .readlock = rcu_torture_read_lock_trivial, 922 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 923 .readunlock = rcu_torture_read_unlock_trivial, 924 .get_gp_seq = rcu_no_completed, 925 .sync = synchronize_rcu_tasks_rude, 926 .exp_sync = synchronize_rcu_tasks_rude, 927 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 928 .get_gp_data = rcu_tasks_rude_get_gp_data, 929 .cbflood_max = 50000, 930 .irq_capable = 1, 931 .name = "tasks-rude" 932 }; 933 934 #define TASKS_RUDE_OPS &tasks_rude_ops, 935 936 #else // #ifdef CONFIG_TASKS_RUDE_RCU 937 938 #define TASKS_RUDE_OPS 939 940 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 941 942 943 #ifdef CONFIG_TASKS_TRACE_RCU 944 945 /* 946 * Definitions for tracing RCU-tasks torture testing. 947 */ 948 949 static int tasks_tracing_torture_read_lock(void) 950 { 951 rcu_read_lock_trace(); 952 return 0; 953 } 954 955 static void tasks_tracing_torture_read_unlock(int idx) 956 { 957 rcu_read_unlock_trace(); 958 } 959 960 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 961 { 962 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 963 } 964 965 static struct rcu_torture_ops tasks_tracing_ops = { 966 .ttype = RCU_TASKS_TRACING_FLAVOR, 967 .init = rcu_sync_torture_init, 968 .readlock = tasks_tracing_torture_read_lock, 969 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 970 .readunlock = tasks_tracing_torture_read_unlock, 971 .readlock_held = rcu_read_lock_trace_held, 972 .get_gp_seq = rcu_no_completed, 973 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 974 .sync = synchronize_rcu_tasks_trace, 975 .exp_sync = synchronize_rcu_tasks_trace, 976 .call = call_rcu_tasks_trace, 977 .cb_barrier = rcu_barrier_tasks_trace, 978 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 979 .get_gp_data = rcu_tasks_trace_get_gp_data, 980 .cbflood_max = 50000, 981 .irq_capable = 1, 982 .slow_gps = 1, 983 .name = "tasks-tracing" 984 }; 985 986 #define TASKS_TRACING_OPS &tasks_tracing_ops, 987 988 #else // #ifdef CONFIG_TASKS_TRACE_RCU 989 990 #define TASKS_TRACING_OPS 991 992 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 993 994 995 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 996 { 997 if (!cur_ops->gp_diff) 998 return new - old; 999 return cur_ops->gp_diff(new, old); 1000 } 1001 1002 /* 1003 * RCU torture priority-boost testing. Runs one real-time thread per 1004 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1005 * for them to complete. If a given grace period takes too long, we assume 1006 * that priority inversion has occurred. 1007 */ 1008 1009 static int old_rt_runtime = -1; 1010 1011 static void rcu_torture_disable_rt_throttle(void) 1012 { 1013 /* 1014 * Disable RT throttling so that rcutorture's boost threads don't get 1015 * throttled. Only possible if rcutorture is built-in otherwise the 1016 * user should manually do this by setting the sched_rt_period_us and 1017 * sched_rt_runtime sysctls. 1018 */ 1019 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1020 return; 1021 1022 old_rt_runtime = sysctl_sched_rt_runtime; 1023 sysctl_sched_rt_runtime = -1; 1024 } 1025 1026 static void rcu_torture_enable_rt_throttle(void) 1027 { 1028 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1029 return; 1030 1031 sysctl_sched_rt_runtime = old_rt_runtime; 1032 old_rt_runtime = -1; 1033 } 1034 1035 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1036 { 1037 int cpu; 1038 static int dbg_done; 1039 unsigned long end = jiffies; 1040 bool gp_done; 1041 unsigned long j; 1042 static unsigned long last_persist; 1043 unsigned long lp; 1044 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1045 1046 if (end - *start > mininterval) { 1047 // Recheck after checking time to avoid false positives. 1048 smp_mb(); // Time check before grace-period check. 1049 if (cur_ops->poll_gp_state(gp_state)) 1050 return false; // passed, though perhaps just barely 1051 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1052 // At most one persisted message per boost test. 1053 j = jiffies; 1054 lp = READ_ONCE(last_persist); 1055 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1056 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1057 return false; // passed on a technicality 1058 } 1059 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1060 n_rcu_torture_boost_failure++; 1061 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1062 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1063 current->rt_priority, gp_state, end - *start); 1064 cur_ops->gp_kthread_dbg(); 1065 // Recheck after print to flag grace period ending during splat. 1066 gp_done = cur_ops->poll_gp_state(gp_state); 1067 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1068 gp_done ? "ended already" : "still pending"); 1069 1070 } 1071 1072 return true; // failed 1073 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1074 *start = jiffies; 1075 } 1076 1077 return false; // passed 1078 } 1079 1080 static int rcu_torture_boost(void *arg) 1081 { 1082 unsigned long endtime; 1083 unsigned long gp_state; 1084 unsigned long gp_state_time; 1085 unsigned long oldstarttime; 1086 1087 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1088 1089 /* Set real-time priority. */ 1090 sched_set_fifo_low(current); 1091 1092 /* Each pass through the following loop does one boost-test cycle. */ 1093 do { 1094 bool failed = false; // Test failed already in this test interval 1095 bool gp_initiated = false; 1096 1097 if (kthread_should_stop()) 1098 goto checkwait; 1099 1100 /* Wait for the next test interval. */ 1101 oldstarttime = READ_ONCE(boost_starttime); 1102 while (time_before(jiffies, oldstarttime)) { 1103 schedule_timeout_interruptible(oldstarttime - jiffies); 1104 if (stutter_wait("rcu_torture_boost")) 1105 sched_set_fifo_low(current); 1106 if (torture_must_stop()) 1107 goto checkwait; 1108 } 1109 1110 // Do one boost-test interval. 1111 endtime = oldstarttime + test_boost_duration * HZ; 1112 while (time_before(jiffies, endtime)) { 1113 // Has current GP gone too long? 1114 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1115 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1116 // If we don't have a grace period in flight, start one. 1117 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1118 gp_state = cur_ops->start_gp_poll(); 1119 gp_initiated = true; 1120 gp_state_time = jiffies; 1121 } 1122 if (stutter_wait("rcu_torture_boost")) { 1123 sched_set_fifo_low(current); 1124 // If the grace period already ended, 1125 // we don't know when that happened, so 1126 // start over. 1127 if (cur_ops->poll_gp_state(gp_state)) 1128 gp_initiated = false; 1129 } 1130 if (torture_must_stop()) 1131 goto checkwait; 1132 } 1133 1134 // In case the grace period extended beyond the end of the loop. 1135 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1136 rcu_torture_boost_failed(gp_state, &gp_state_time); 1137 1138 /* 1139 * Set the start time of the next test interval. 1140 * Yes, this is vulnerable to long delays, but such 1141 * delays simply cause a false negative for the next 1142 * interval. Besides, we are running at RT priority, 1143 * so delays should be relatively rare. 1144 */ 1145 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1146 if (mutex_trylock(&boost_mutex)) { 1147 if (oldstarttime == boost_starttime) { 1148 WRITE_ONCE(boost_starttime, 1149 jiffies + test_boost_interval * HZ); 1150 n_rcu_torture_boosts++; 1151 } 1152 mutex_unlock(&boost_mutex); 1153 break; 1154 } 1155 schedule_timeout_uninterruptible(HZ / 20); 1156 } 1157 1158 /* Go do the stutter. */ 1159 checkwait: if (stutter_wait("rcu_torture_boost")) 1160 sched_set_fifo_low(current); 1161 } while (!torture_must_stop()); 1162 1163 /* Clean up and exit. */ 1164 while (!kthread_should_stop()) { 1165 torture_shutdown_absorb("rcu_torture_boost"); 1166 schedule_timeout_uninterruptible(HZ / 20); 1167 } 1168 torture_kthread_stopping("rcu_torture_boost"); 1169 return 0; 1170 } 1171 1172 /* 1173 * RCU torture force-quiescent-state kthread. Repeatedly induces 1174 * bursts of calls to force_quiescent_state(), increasing the probability 1175 * of occurrence of some important types of race conditions. 1176 */ 1177 static int 1178 rcu_torture_fqs(void *arg) 1179 { 1180 unsigned long fqs_resume_time; 1181 int fqs_burst_remaining; 1182 int oldnice = task_nice(current); 1183 1184 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1185 do { 1186 fqs_resume_time = jiffies + fqs_stutter * HZ; 1187 while (time_before(jiffies, fqs_resume_time) && 1188 !kthread_should_stop()) { 1189 schedule_timeout_interruptible(HZ / 20); 1190 } 1191 fqs_burst_remaining = fqs_duration; 1192 while (fqs_burst_remaining > 0 && 1193 !kthread_should_stop()) { 1194 cur_ops->fqs(); 1195 udelay(fqs_holdoff); 1196 fqs_burst_remaining -= fqs_holdoff; 1197 } 1198 if (stutter_wait("rcu_torture_fqs")) 1199 sched_set_normal(current, oldnice); 1200 } while (!torture_must_stop()); 1201 torture_kthread_stopping("rcu_torture_fqs"); 1202 return 0; 1203 } 1204 1205 // Used by writers to randomly choose from the available grace-period primitives. 1206 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1207 static int nsynctypes; 1208 1209 /* 1210 * Determine which grace-period primitives are available. 1211 */ 1212 static void rcu_torture_write_types(void) 1213 { 1214 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1215 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1216 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1217 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1218 1219 /* Initialize synctype[] array. If none set, take default. */ 1220 if (!gp_cond1 && 1221 !gp_cond_exp1 && 1222 !gp_cond_full1 && 1223 !gp_cond_exp_full1 && 1224 !gp_exp1 && 1225 !gp_poll_exp1 && 1226 !gp_poll_exp_full1 && 1227 !gp_normal1 && 1228 !gp_poll1 && 1229 !gp_poll_full1 && 1230 !gp_sync1) { 1231 gp_cond1 = true; 1232 gp_cond_exp1 = true; 1233 gp_cond_full1 = true; 1234 gp_cond_exp_full1 = true; 1235 gp_exp1 = true; 1236 gp_poll_exp1 = true; 1237 gp_poll_exp_full1 = true; 1238 gp_normal1 = true; 1239 gp_poll1 = true; 1240 gp_poll_full1 = true; 1241 gp_sync1 = true; 1242 } 1243 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1244 synctype[nsynctypes++] = RTWS_COND_GET; 1245 pr_info("%s: Testing conditional GPs.\n", __func__); 1246 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1247 pr_alert("%s: gp_cond without primitives.\n", __func__); 1248 } 1249 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1250 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1251 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1252 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1253 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1254 } 1255 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1256 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1257 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1258 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1259 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1260 } 1261 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1262 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1263 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1264 } else if (gp_cond_exp_full && 1265 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1266 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1267 } 1268 if (gp_exp1 && cur_ops->exp_sync) { 1269 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1270 pr_info("%s: Testing expedited GPs.\n", __func__); 1271 } else if (gp_exp && !cur_ops->exp_sync) { 1272 pr_alert("%s: gp_exp without primitives.\n", __func__); 1273 } 1274 if (gp_normal1 && cur_ops->deferred_free) { 1275 synctype[nsynctypes++] = RTWS_DEF_FREE; 1276 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1277 } else if (gp_normal && !cur_ops->deferred_free) { 1278 pr_alert("%s: gp_normal without primitives.\n", __func__); 1279 } 1280 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1281 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1282 synctype[nsynctypes++] = RTWS_POLL_GET; 1283 pr_info("%s: Testing polling GPs.\n", __func__); 1284 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1285 pr_alert("%s: gp_poll without primitives.\n", __func__); 1286 } 1287 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1288 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1289 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1290 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1291 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1292 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1293 } 1294 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1295 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1296 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1297 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1298 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1299 } 1300 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1301 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1302 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1303 } else if (gp_poll_exp_full && 1304 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1305 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1306 } 1307 if (gp_sync1 && cur_ops->sync) { 1308 synctype[nsynctypes++] = RTWS_SYNC; 1309 pr_info("%s: Testing normal GPs.\n", __func__); 1310 } else if (gp_sync && !cur_ops->sync) { 1311 pr_alert("%s: gp_sync without primitives.\n", __func__); 1312 } 1313 } 1314 1315 /* 1316 * Do the specified rcu_torture_writer() synchronous grace period, 1317 * while also testing out the polled APIs. Note well that the single-CPU 1318 * grace-period optimizations must be accounted for. 1319 */ 1320 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1321 { 1322 unsigned long cookie; 1323 struct rcu_gp_oldstate cookie_full; 1324 bool dopoll; 1325 bool dopoll_full; 1326 unsigned long r = torture_random(trsp); 1327 1328 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1329 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1330 if (dopoll || dopoll_full) 1331 cpus_read_lock(); 1332 if (dopoll) 1333 cookie = cur_ops->get_gp_state(); 1334 if (dopoll_full) 1335 cur_ops->get_gp_state_full(&cookie_full); 1336 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1337 sync(); 1338 sync(); 1339 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1340 "%s: Cookie check 3 failed %pS() online %*pbl.", 1341 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1342 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1343 "%s: Cookie check 4 failed %pS() online %*pbl", 1344 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1345 if (dopoll || dopoll_full) 1346 cpus_read_unlock(); 1347 } 1348 1349 /* 1350 * RCU torture writer kthread. Repeatedly substitutes a new structure 1351 * for that pointed to by rcu_torture_current, freeing the old structure 1352 * after a series of grace periods (the "pipeline"). 1353 */ 1354 static int 1355 rcu_torture_writer(void *arg) 1356 { 1357 bool boot_ended; 1358 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1359 unsigned long cookie; 1360 struct rcu_gp_oldstate cookie_full; 1361 int expediting = 0; 1362 unsigned long gp_snap; 1363 unsigned long gp_snap1; 1364 struct rcu_gp_oldstate gp_snap_full; 1365 struct rcu_gp_oldstate gp_snap1_full; 1366 int i; 1367 int idx; 1368 int oldnice = task_nice(current); 1369 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE]; 1370 struct rcu_torture *rp; 1371 struct rcu_torture *old_rp; 1372 static DEFINE_TORTURE_RANDOM(rand); 1373 unsigned long stallsdone = jiffies; 1374 bool stutter_waited; 1375 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE]; 1376 1377 // If a new stall test is added, this must be adjusted. 1378 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1379 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ; 1380 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1381 if (!can_expedite) 1382 pr_alert("%s" TORTURE_FLAG 1383 " GP expediting controlled from boot/sysfs for %s.\n", 1384 torture_type, cur_ops->name); 1385 if (WARN_ONCE(nsynctypes == 0, 1386 "%s: No update-side primitives.\n", __func__)) { 1387 /* 1388 * No updates primitives, so don't try updating. 1389 * The resulting test won't be testing much, hence the 1390 * above WARN_ONCE(). 1391 */ 1392 rcu_torture_writer_state = RTWS_STOPPING; 1393 torture_kthread_stopping("rcu_torture_writer"); 1394 return 0; 1395 } 1396 1397 do { 1398 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1399 torture_hrtimeout_us(500, 1000, &rand); 1400 rp = rcu_torture_alloc(); 1401 if (rp == NULL) 1402 continue; 1403 rp->rtort_pipe_count = 0; 1404 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1405 rcu_torture_writer_state = RTWS_DELAY; 1406 udelay(torture_random(&rand) & 0x3ff); 1407 rcu_torture_writer_state = RTWS_REPLACE; 1408 old_rp = rcu_dereference_check(rcu_torture_current, 1409 current == writer_task); 1410 rp->rtort_mbtest = 1; 1411 rcu_assign_pointer(rcu_torture_current, rp); 1412 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1413 if (old_rp) { 1414 i = old_rp->rtort_pipe_count; 1415 if (i > RCU_TORTURE_PIPE_LEN) 1416 i = RCU_TORTURE_PIPE_LEN; 1417 atomic_inc(&rcu_torture_wcount[i]); 1418 WRITE_ONCE(old_rp->rtort_pipe_count, 1419 old_rp->rtort_pipe_count + 1); 1420 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1421 1422 // Make sure readers block polled grace periods. 1423 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1424 idx = cur_ops->readlock(); 1425 cookie = cur_ops->get_gp_state(); 1426 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1427 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1428 __func__, 1429 rcu_torture_writer_state_getname(), 1430 rcu_torture_writer_state, 1431 cookie, cur_ops->get_gp_state()); 1432 if (cur_ops->get_gp_completed) { 1433 cookie = cur_ops->get_gp_completed(); 1434 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1435 } 1436 cur_ops->readunlock(idx); 1437 } 1438 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1439 idx = cur_ops->readlock(); 1440 cur_ops->get_gp_state_full(&cookie_full); 1441 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1442 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1443 __func__, 1444 rcu_torture_writer_state_getname(), 1445 rcu_torture_writer_state, 1446 cpumask_pr_args(cpu_online_mask)); 1447 if (cur_ops->get_gp_completed_full) { 1448 cur_ops->get_gp_completed_full(&cookie_full); 1449 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1450 } 1451 cur_ops->readunlock(idx); 1452 } 1453 switch (synctype[torture_random(&rand) % nsynctypes]) { 1454 case RTWS_DEF_FREE: 1455 rcu_torture_writer_state = RTWS_DEF_FREE; 1456 cur_ops->deferred_free(old_rp); 1457 break; 1458 case RTWS_EXP_SYNC: 1459 rcu_torture_writer_state = RTWS_EXP_SYNC; 1460 do_rtws_sync(&rand, cur_ops->exp_sync); 1461 rcu_torture_pipe_update(old_rp); 1462 break; 1463 case RTWS_COND_GET: 1464 rcu_torture_writer_state = RTWS_COND_GET; 1465 gp_snap = cur_ops->get_gp_state(); 1466 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1467 rcu_torture_writer_state = RTWS_COND_SYNC; 1468 cur_ops->cond_sync(gp_snap); 1469 rcu_torture_pipe_update(old_rp); 1470 break; 1471 case RTWS_COND_GET_EXP: 1472 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1473 gp_snap = cur_ops->get_gp_state_exp(); 1474 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1475 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1476 cur_ops->cond_sync_exp(gp_snap); 1477 rcu_torture_pipe_update(old_rp); 1478 break; 1479 case RTWS_COND_GET_FULL: 1480 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1481 cur_ops->get_gp_state_full(&gp_snap_full); 1482 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1483 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1484 cur_ops->cond_sync_full(&gp_snap_full); 1485 rcu_torture_pipe_update(old_rp); 1486 break; 1487 case RTWS_COND_GET_EXP_FULL: 1488 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1489 cur_ops->get_gp_state_full(&gp_snap_full); 1490 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1491 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1492 cur_ops->cond_sync_exp_full(&gp_snap_full); 1493 rcu_torture_pipe_update(old_rp); 1494 break; 1495 case RTWS_POLL_GET: 1496 rcu_torture_writer_state = RTWS_POLL_GET; 1497 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1498 ulo[i] = cur_ops->get_comp_state(); 1499 gp_snap = cur_ops->start_gp_poll(); 1500 rcu_torture_writer_state = RTWS_POLL_WAIT; 1501 while (!cur_ops->poll_gp_state(gp_snap)) { 1502 gp_snap1 = cur_ops->get_gp_state(); 1503 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1504 if (cur_ops->poll_gp_state(ulo[i]) || 1505 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1506 ulo[i] = gp_snap1; 1507 break; 1508 } 1509 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo)); 1510 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1511 &rand); 1512 } 1513 rcu_torture_pipe_update(old_rp); 1514 break; 1515 case RTWS_POLL_GET_FULL: 1516 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1517 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1518 cur_ops->get_comp_state_full(&rgo[i]); 1519 cur_ops->start_gp_poll_full(&gp_snap_full); 1520 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1521 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1522 cur_ops->get_gp_state_full(&gp_snap1_full); 1523 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1524 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1525 cur_ops->same_gp_state_full(&rgo[i], 1526 &gp_snap1_full)) { 1527 rgo[i] = gp_snap1_full; 1528 break; 1529 } 1530 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo)); 1531 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1532 &rand); 1533 } 1534 rcu_torture_pipe_update(old_rp); 1535 break; 1536 case RTWS_POLL_GET_EXP: 1537 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1538 gp_snap = cur_ops->start_gp_poll_exp(); 1539 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1540 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1541 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1542 &rand); 1543 rcu_torture_pipe_update(old_rp); 1544 break; 1545 case RTWS_POLL_GET_EXP_FULL: 1546 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1547 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1548 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1549 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1550 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1551 &rand); 1552 rcu_torture_pipe_update(old_rp); 1553 break; 1554 case RTWS_SYNC: 1555 rcu_torture_writer_state = RTWS_SYNC; 1556 do_rtws_sync(&rand, cur_ops->sync); 1557 rcu_torture_pipe_update(old_rp); 1558 break; 1559 default: 1560 WARN_ON_ONCE(1); 1561 break; 1562 } 1563 } 1564 WRITE_ONCE(rcu_torture_current_version, 1565 rcu_torture_current_version + 1); 1566 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1567 if (can_expedite && 1568 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1569 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1570 if (expediting >= 0) 1571 rcu_expedite_gp(); 1572 else 1573 rcu_unexpedite_gp(); 1574 if (++expediting > 3) 1575 expediting = -expediting; 1576 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1577 can_expedite = !rcu_gp_is_expedited() && 1578 !rcu_gp_is_normal(); 1579 } 1580 rcu_torture_writer_state = RTWS_STUTTER; 1581 boot_ended = rcu_inkernel_boot_has_ended(); 1582 stutter_waited = stutter_wait("rcu_torture_writer"); 1583 if (stutter_waited && 1584 !atomic_read(&rcu_fwd_cb_nodelay) && 1585 !cur_ops->slow_gps && 1586 !torture_must_stop() && 1587 boot_ended && 1588 time_after(jiffies, stallsdone)) 1589 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1590 if (list_empty(&rcu_tortures[i].rtort_free) && 1591 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1592 tracing_off(); 1593 if (cur_ops->gp_kthread_dbg) 1594 cur_ops->gp_kthread_dbg(); 1595 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1596 rcu_ftrace_dump(DUMP_ALL); 1597 } 1598 if (stutter_waited) 1599 sched_set_normal(current, oldnice); 1600 } while (!torture_must_stop()); 1601 rcu_torture_current = NULL; // Let stats task know that we are done. 1602 /* Reset expediting back to unexpedited. */ 1603 if (expediting > 0) 1604 expediting = -expediting; 1605 while (can_expedite && expediting++ < 0) 1606 rcu_unexpedite_gp(); 1607 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1608 if (!can_expedite) 1609 pr_alert("%s" TORTURE_FLAG 1610 " Dynamic grace-period expediting was disabled.\n", 1611 torture_type); 1612 rcu_torture_writer_state = RTWS_STOPPING; 1613 torture_kthread_stopping("rcu_torture_writer"); 1614 return 0; 1615 } 1616 1617 /* 1618 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1619 * delay between calls. 1620 */ 1621 static int 1622 rcu_torture_fakewriter(void *arg) 1623 { 1624 unsigned long gp_snap; 1625 struct rcu_gp_oldstate gp_snap_full; 1626 DEFINE_TORTURE_RANDOM(rand); 1627 1628 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1629 set_user_nice(current, MAX_NICE); 1630 1631 if (WARN_ONCE(nsynctypes == 0, 1632 "%s: No update-side primitives.\n", __func__)) { 1633 /* 1634 * No updates primitives, so don't try updating. 1635 * The resulting test won't be testing much, hence the 1636 * above WARN_ONCE(). 1637 */ 1638 torture_kthread_stopping("rcu_torture_fakewriter"); 1639 return 0; 1640 } 1641 1642 do { 1643 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1644 if (cur_ops->cb_barrier != NULL && 1645 torture_random(&rand) % (nfakewriters * 8) == 0) { 1646 cur_ops->cb_barrier(); 1647 } else { 1648 switch (synctype[torture_random(&rand) % nsynctypes]) { 1649 case RTWS_DEF_FREE: 1650 break; 1651 case RTWS_EXP_SYNC: 1652 cur_ops->exp_sync(); 1653 break; 1654 case RTWS_COND_GET: 1655 gp_snap = cur_ops->get_gp_state(); 1656 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1657 cur_ops->cond_sync(gp_snap); 1658 break; 1659 case RTWS_COND_GET_EXP: 1660 gp_snap = cur_ops->get_gp_state_exp(); 1661 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1662 cur_ops->cond_sync_exp(gp_snap); 1663 break; 1664 case RTWS_COND_GET_FULL: 1665 cur_ops->get_gp_state_full(&gp_snap_full); 1666 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1667 cur_ops->cond_sync_full(&gp_snap_full); 1668 break; 1669 case RTWS_COND_GET_EXP_FULL: 1670 cur_ops->get_gp_state_full(&gp_snap_full); 1671 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1672 cur_ops->cond_sync_exp_full(&gp_snap_full); 1673 break; 1674 case RTWS_POLL_GET: 1675 gp_snap = cur_ops->start_gp_poll(); 1676 while (!cur_ops->poll_gp_state(gp_snap)) { 1677 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1678 &rand); 1679 } 1680 break; 1681 case RTWS_POLL_GET_FULL: 1682 cur_ops->start_gp_poll_full(&gp_snap_full); 1683 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1684 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1685 &rand); 1686 } 1687 break; 1688 case RTWS_POLL_GET_EXP: 1689 gp_snap = cur_ops->start_gp_poll_exp(); 1690 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1691 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1692 &rand); 1693 } 1694 break; 1695 case RTWS_POLL_GET_EXP_FULL: 1696 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1697 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1698 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1699 &rand); 1700 } 1701 break; 1702 case RTWS_SYNC: 1703 cur_ops->sync(); 1704 break; 1705 default: 1706 WARN_ON_ONCE(1); 1707 break; 1708 } 1709 } 1710 stutter_wait("rcu_torture_fakewriter"); 1711 } while (!torture_must_stop()); 1712 1713 torture_kthread_stopping("rcu_torture_fakewriter"); 1714 return 0; 1715 } 1716 1717 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1718 { 1719 kfree(rhp); 1720 } 1721 1722 // Set up and carry out testing of RCU's global memory ordering 1723 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1724 struct torture_random_state *trsp) 1725 { 1726 unsigned long loops; 1727 int noc = torture_num_online_cpus(); 1728 int rdrchked; 1729 int rdrchker; 1730 struct rcu_torture_reader_check *rtrcp; // Me. 1731 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1732 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1733 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1734 1735 if (myid < 0) 1736 return; // Don't try this from timer handlers. 1737 1738 // Increment my counter. 1739 rtrcp = &rcu_torture_reader_mbchk[myid]; 1740 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1741 1742 // Attempt to assign someone else some checking work. 1743 rdrchked = torture_random(trsp) % nrealreaders; 1744 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1745 rdrchker = torture_random(trsp) % nrealreaders; 1746 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1747 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1748 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1749 !READ_ONCE(rtp->rtort_chkp) && 1750 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1751 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1752 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1753 rtrcp->rtc_chkrdr = rdrchked; 1754 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1755 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1756 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1757 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1758 } 1759 1760 // If assigned some completed work, do it! 1761 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1762 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1763 return; // No work or work not yet ready. 1764 rdrchked = rtrcp_assigner->rtc_chkrdr; 1765 if (WARN_ON_ONCE(rdrchked < 0)) 1766 return; 1767 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1768 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1769 atomic_inc(&n_rcu_torture_mbchk_tries); 1770 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1771 atomic_inc(&n_rcu_torture_mbchk_fail); 1772 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1773 rtrcp_assigner->rtc_ready = 0; 1774 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1775 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1776 } 1777 1778 /* 1779 * Do one extension of an RCU read-side critical section using the 1780 * current reader state in readstate (set to zero for initial entry 1781 * to extended critical section), set the new state as specified by 1782 * newstate (set to zero for final exit from extended critical section), 1783 * and random-number-generator state in trsp. If this is neither the 1784 * beginning or end of the critical section and if there was actually a 1785 * change, do a ->read_delay(). 1786 */ 1787 static void rcutorture_one_extend(int *readstate, int newstate, 1788 struct torture_random_state *trsp, 1789 struct rt_read_seg *rtrsp) 1790 { 1791 unsigned long flags; 1792 int idxnew1 = -1; 1793 int idxnew2 = -1; 1794 int idxold1 = *readstate; 1795 int idxold2 = idxold1; 1796 int statesnew = ~*readstate & newstate; 1797 int statesold = *readstate & ~newstate; 1798 1799 WARN_ON_ONCE(idxold2 < 0); 1800 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1801 rtrsp->rt_readstate = newstate; 1802 1803 /* First, put new protection in place to avoid critical-section gap. */ 1804 if (statesnew & RCUTORTURE_RDR_BH) 1805 local_bh_disable(); 1806 if (statesnew & RCUTORTURE_RDR_RBH) 1807 rcu_read_lock_bh(); 1808 if (statesnew & RCUTORTURE_RDR_IRQ) 1809 local_irq_disable(); 1810 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1811 preempt_disable(); 1812 if (statesnew & RCUTORTURE_RDR_SCHED) 1813 rcu_read_lock_sched(); 1814 if (statesnew & RCUTORTURE_RDR_RCU_1) 1815 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1816 if (statesnew & RCUTORTURE_RDR_RCU_2) 1817 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1818 1819 /* 1820 * Next, remove old protection, in decreasing order of strength 1821 * to avoid unlock paths that aren't safe in the stronger 1822 * context. Namely: BH can not be enabled with disabled interrupts. 1823 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1824 * context. 1825 */ 1826 if (statesold & RCUTORTURE_RDR_IRQ) 1827 local_irq_enable(); 1828 if (statesold & RCUTORTURE_RDR_PREEMPT) 1829 preempt_enable(); 1830 if (statesold & RCUTORTURE_RDR_SCHED) 1831 rcu_read_unlock_sched(); 1832 if (statesold & RCUTORTURE_RDR_BH) 1833 local_bh_enable(); 1834 if (statesold & RCUTORTURE_RDR_RBH) 1835 rcu_read_unlock_bh(); 1836 if (statesold & RCUTORTURE_RDR_RCU_2) { 1837 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1838 WARN_ON_ONCE(idxnew2 != -1); 1839 idxold2 = 0; 1840 } 1841 if (statesold & RCUTORTURE_RDR_RCU_1) { 1842 bool lockit; 1843 1844 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1845 if (lockit) 1846 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1847 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1848 WARN_ON_ONCE(idxnew1 != -1); 1849 idxold1 = 0; 1850 if (lockit) 1851 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1852 } 1853 1854 /* Delay if neither beginning nor end and there was a change. */ 1855 if ((statesnew || statesold) && *readstate && newstate) 1856 cur_ops->read_delay(trsp, rtrsp); 1857 1858 /* Update the reader state. */ 1859 if (idxnew1 == -1) 1860 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1861 WARN_ON_ONCE(idxnew1 < 0); 1862 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1863 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1864 if (idxnew2 == -1) 1865 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1866 WARN_ON_ONCE(idxnew2 < 0); 1867 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1868 *readstate = idxnew1 | idxnew2 | newstate; 1869 WARN_ON_ONCE(*readstate < 0); 1870 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1871 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1872 } 1873 1874 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1875 static int rcutorture_extend_mask_max(void) 1876 { 1877 int mask; 1878 1879 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1880 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1881 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1882 return mask; 1883 } 1884 1885 /* Return a random protection state mask, but with at least one bit set. */ 1886 static int 1887 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1888 { 1889 int mask = rcutorture_extend_mask_max(); 1890 unsigned long randmask1 = torture_random(trsp); 1891 unsigned long randmask2 = randmask1 >> 3; 1892 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1893 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1894 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1895 1896 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1897 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1898 if (!(randmask1 & 0x7)) 1899 mask = mask & randmask2; 1900 else 1901 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1902 1903 // Can't have nested RCU reader without outer RCU reader. 1904 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1905 if (oldmask & RCUTORTURE_RDR_RCU_1) 1906 mask &= ~RCUTORTURE_RDR_RCU_2; 1907 else 1908 mask |= RCUTORTURE_RDR_RCU_1; 1909 } 1910 1911 /* 1912 * Can't enable bh w/irq disabled. 1913 */ 1914 if (mask & RCUTORTURE_RDR_IRQ) 1915 mask |= oldmask & bhs; 1916 1917 /* 1918 * Ideally these sequences would be detected in debug builds 1919 * (regardless of RT), but until then don't stop testing 1920 * them on non-RT. 1921 */ 1922 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1923 /* Can't modify BH in atomic context */ 1924 if (oldmask & preempts_irq) 1925 mask &= ~bhs; 1926 if ((oldmask | mask) & preempts_irq) 1927 mask |= oldmask & bhs; 1928 } 1929 1930 return mask ?: RCUTORTURE_RDR_RCU_1; 1931 } 1932 1933 /* 1934 * Do a randomly selected number of extensions of an existing RCU read-side 1935 * critical section. 1936 */ 1937 static struct rt_read_seg * 1938 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1939 struct rt_read_seg *rtrsp) 1940 { 1941 int i; 1942 int j; 1943 int mask = rcutorture_extend_mask_max(); 1944 1945 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1946 if (!((mask - 1) & mask)) 1947 return rtrsp; /* Current RCU reader not extendable. */ 1948 /* Bias towards larger numbers of loops. */ 1949 i = torture_random(trsp); 1950 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1951 for (j = 0; j < i; j++) { 1952 mask = rcutorture_extend_mask(*readstate, trsp); 1953 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1954 } 1955 return &rtrsp[j]; 1956 } 1957 1958 /* 1959 * Do one read-side critical section, returning false if there was 1960 * no data to read. Can be invoked both from process context and 1961 * from a timer handler. 1962 */ 1963 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1964 { 1965 bool checkpolling = !(torture_random(trsp) & 0xfff); 1966 unsigned long cookie; 1967 struct rcu_gp_oldstate cookie_full; 1968 int i; 1969 unsigned long started; 1970 unsigned long completed; 1971 int newstate; 1972 struct rcu_torture *p; 1973 int pipe_count; 1974 int readstate = 0; 1975 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1976 struct rt_read_seg *rtrsp = &rtseg[0]; 1977 struct rt_read_seg *rtrsp1; 1978 unsigned long long ts; 1979 1980 WARN_ON_ONCE(!rcu_is_watching()); 1981 newstate = rcutorture_extend_mask(readstate, trsp); 1982 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1983 if (checkpolling) { 1984 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1985 cookie = cur_ops->get_gp_state(); 1986 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1987 cur_ops->get_gp_state_full(&cookie_full); 1988 } 1989 started = cur_ops->get_gp_seq(); 1990 ts = rcu_trace_clock_local(); 1991 p = rcu_dereference_check(rcu_torture_current, 1992 !cur_ops->readlock_held || cur_ops->readlock_held()); 1993 if (p == NULL) { 1994 /* Wait for rcu_torture_writer to get underway */ 1995 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1996 return false; 1997 } 1998 if (p->rtort_mbtest == 0) 1999 atomic_inc(&n_rcu_torture_mberror); 2000 rcu_torture_reader_do_mbchk(myid, p, trsp); 2001 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 2002 preempt_disable(); 2003 pipe_count = READ_ONCE(p->rtort_pipe_count); 2004 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2005 // Should not happen in a correct RCU implementation, 2006 // happens quite often for torture_type=busted. 2007 pipe_count = RCU_TORTURE_PIPE_LEN; 2008 } 2009 completed = cur_ops->get_gp_seq(); 2010 if (pipe_count > 1) { 2011 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 2012 ts, started, completed); 2013 rcu_ftrace_dump(DUMP_ALL); 2014 } 2015 __this_cpu_inc(rcu_torture_count[pipe_count]); 2016 completed = rcutorture_seq_diff(completed, started); 2017 if (completed > RCU_TORTURE_PIPE_LEN) { 2018 /* Should not happen, but... */ 2019 completed = RCU_TORTURE_PIPE_LEN; 2020 } 2021 __this_cpu_inc(rcu_torture_batch[completed]); 2022 preempt_enable(); 2023 if (checkpolling) { 2024 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2025 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2026 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2027 __func__, 2028 rcu_torture_writer_state_getname(), 2029 rcu_torture_writer_state, 2030 cookie, cur_ops->get_gp_state()); 2031 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2032 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2033 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2034 __func__, 2035 rcu_torture_writer_state_getname(), 2036 rcu_torture_writer_state, 2037 cpumask_pr_args(cpu_online_mask)); 2038 } 2039 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2040 WARN_ON_ONCE(readstate); 2041 // This next splat is expected behavior if leakpointer, especially 2042 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2043 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2044 2045 /* If error or close call, record the sequence of reader protections. */ 2046 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2047 i = 0; 2048 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2049 err_segs[i++] = *rtrsp1; 2050 rt_read_nsegs = i; 2051 } 2052 2053 return true; 2054 } 2055 2056 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2057 2058 /* 2059 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2060 * incrementing the corresponding element of the pipeline array. The 2061 * counter in the element should never be greater than 1, otherwise, the 2062 * RCU implementation is broken. 2063 */ 2064 static void rcu_torture_timer(struct timer_list *unused) 2065 { 2066 atomic_long_inc(&n_rcu_torture_timers); 2067 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2068 2069 /* Test call_rcu() invocation from interrupt handler. */ 2070 if (cur_ops->call) { 2071 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2072 2073 if (rhp) 2074 cur_ops->call(rhp, rcu_torture_timer_cb); 2075 } 2076 } 2077 2078 /* 2079 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2080 * incrementing the corresponding element of the pipeline array. The 2081 * counter in the element should never be greater than 1, otherwise, the 2082 * RCU implementation is broken. 2083 */ 2084 static int 2085 rcu_torture_reader(void *arg) 2086 { 2087 unsigned long lastsleep = jiffies; 2088 long myid = (long)arg; 2089 int mynumonline = myid; 2090 DEFINE_TORTURE_RANDOM(rand); 2091 struct timer_list t; 2092 2093 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2094 set_user_nice(current, MAX_NICE); 2095 if (irqreader && cur_ops->irq_capable) 2096 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2097 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2098 do { 2099 if (irqreader && cur_ops->irq_capable) { 2100 if (!timer_pending(&t)) 2101 mod_timer(&t, jiffies + 1); 2102 } 2103 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2104 schedule_timeout_interruptible(HZ); 2105 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2106 torture_hrtimeout_us(500, 1000, &rand); 2107 lastsleep = jiffies + 10; 2108 } 2109 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2110 schedule_timeout_interruptible(HZ / 5); 2111 stutter_wait("rcu_torture_reader"); 2112 } while (!torture_must_stop()); 2113 if (irqreader && cur_ops->irq_capable) { 2114 del_timer_sync(&t); 2115 destroy_timer_on_stack(&t); 2116 } 2117 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2118 torture_kthread_stopping("rcu_torture_reader"); 2119 return 0; 2120 } 2121 2122 /* 2123 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2124 * increase race probabilities and fuzzes the interval between toggling. 2125 */ 2126 static int rcu_nocb_toggle(void *arg) 2127 { 2128 int cpu; 2129 int maxcpu = -1; 2130 int oldnice = task_nice(current); 2131 long r; 2132 DEFINE_TORTURE_RANDOM(rand); 2133 ktime_t toggle_delay; 2134 unsigned long toggle_fuzz; 2135 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2136 2137 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2138 while (!rcu_inkernel_boot_has_ended()) 2139 schedule_timeout_interruptible(HZ / 10); 2140 for_each_possible_cpu(cpu) 2141 maxcpu = cpu; 2142 WARN_ON(maxcpu < 0); 2143 if (toggle_interval > ULONG_MAX) 2144 toggle_fuzz = ULONG_MAX >> 3; 2145 else 2146 toggle_fuzz = toggle_interval >> 3; 2147 if (toggle_fuzz <= 0) 2148 toggle_fuzz = NSEC_PER_USEC; 2149 do { 2150 r = torture_random(&rand); 2151 cpu = (r >> 1) % (maxcpu + 1); 2152 if (r & 0x1) { 2153 rcu_nocb_cpu_offload(cpu); 2154 atomic_long_inc(&n_nocb_offload); 2155 } else { 2156 rcu_nocb_cpu_deoffload(cpu); 2157 atomic_long_inc(&n_nocb_deoffload); 2158 } 2159 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2160 set_current_state(TASK_INTERRUPTIBLE); 2161 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2162 if (stutter_wait("rcu_nocb_toggle")) 2163 sched_set_normal(current, oldnice); 2164 } while (!torture_must_stop()); 2165 torture_kthread_stopping("rcu_nocb_toggle"); 2166 return 0; 2167 } 2168 2169 /* 2170 * Print torture statistics. Caller must ensure that there is only 2171 * one call to this function at a given time!!! This is normally 2172 * accomplished by relying on the module system to only have one copy 2173 * of the module loaded, and then by giving the rcu_torture_stats 2174 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2175 * thread is not running). 2176 */ 2177 static void 2178 rcu_torture_stats_print(void) 2179 { 2180 int cpu; 2181 int i; 2182 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2183 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2184 struct rcu_torture *rtcp; 2185 static unsigned long rtcv_snap = ULONG_MAX; 2186 static bool splatted; 2187 struct task_struct *wtp; 2188 2189 for_each_possible_cpu(cpu) { 2190 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2191 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2192 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2193 } 2194 } 2195 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2196 if (pipesummary[i] != 0) 2197 break; 2198 } 2199 2200 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2201 rtcp = rcu_access_pointer(rcu_torture_current); 2202 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2203 rtcp, 2204 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2205 rcu_torture_current_version, 2206 list_empty(&rcu_torture_freelist), 2207 atomic_read(&n_rcu_torture_alloc), 2208 atomic_read(&n_rcu_torture_alloc_fail), 2209 atomic_read(&n_rcu_torture_free)); 2210 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2211 atomic_read(&n_rcu_torture_mberror), 2212 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2213 n_rcu_torture_barrier_error, 2214 n_rcu_torture_boost_ktrerror); 2215 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2216 n_rcu_torture_boost_failure, 2217 n_rcu_torture_boosts, 2218 atomic_long_read(&n_rcu_torture_timers)); 2219 torture_onoff_stats(); 2220 pr_cont("barrier: %ld/%ld:%ld ", 2221 data_race(n_barrier_successes), 2222 data_race(n_barrier_attempts), 2223 data_race(n_rcu_torture_barrier_error)); 2224 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2225 pr_cont("nocb-toggles: %ld:%ld\n", 2226 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2227 2228 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2229 if (atomic_read(&n_rcu_torture_mberror) || 2230 atomic_read(&n_rcu_torture_mbchk_fail) || 2231 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2232 n_rcu_torture_boost_failure || i > 1) { 2233 pr_cont("%s", "!!! "); 2234 atomic_inc(&n_rcu_torture_error); 2235 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2236 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2237 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2238 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2239 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2240 WARN_ON_ONCE(i > 1); // Too-short grace period 2241 } 2242 pr_cont("Reader Pipe: "); 2243 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2244 pr_cont(" %ld", pipesummary[i]); 2245 pr_cont("\n"); 2246 2247 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2248 pr_cont("Reader Batch: "); 2249 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2250 pr_cont(" %ld", batchsummary[i]); 2251 pr_cont("\n"); 2252 2253 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2254 pr_cont("Free-Block Circulation: "); 2255 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2256 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2257 } 2258 pr_cont("\n"); 2259 2260 if (cur_ops->stats) 2261 cur_ops->stats(); 2262 if (rtcv_snap == rcu_torture_current_version && 2263 rcu_access_pointer(rcu_torture_current) && 2264 !rcu_stall_is_suppressed()) { 2265 int __maybe_unused flags = 0; 2266 unsigned long __maybe_unused gp_seq = 0; 2267 2268 if (cur_ops->get_gp_data) 2269 cur_ops->get_gp_data(&flags, &gp_seq); 2270 wtp = READ_ONCE(writer_task); 2271 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2272 rcu_torture_writer_state_getname(), 2273 rcu_torture_writer_state, gp_seq, flags, 2274 wtp == NULL ? ~0U : wtp->__state, 2275 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2276 if (!splatted && wtp) { 2277 sched_show_task(wtp); 2278 splatted = true; 2279 } 2280 if (cur_ops->gp_kthread_dbg) 2281 cur_ops->gp_kthread_dbg(); 2282 rcu_ftrace_dump(DUMP_ALL); 2283 } 2284 rtcv_snap = rcu_torture_current_version; 2285 } 2286 2287 /* 2288 * Periodically prints torture statistics, if periodic statistics printing 2289 * was specified via the stat_interval module parameter. 2290 */ 2291 static int 2292 rcu_torture_stats(void *arg) 2293 { 2294 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2295 do { 2296 schedule_timeout_interruptible(stat_interval * HZ); 2297 rcu_torture_stats_print(); 2298 torture_shutdown_absorb("rcu_torture_stats"); 2299 } while (!torture_must_stop()); 2300 torture_kthread_stopping("rcu_torture_stats"); 2301 return 0; 2302 } 2303 2304 /* Test mem_dump_obj() and friends. */ 2305 static void rcu_torture_mem_dump_obj(void) 2306 { 2307 struct rcu_head *rhp; 2308 struct kmem_cache *kcp; 2309 static int z; 2310 2311 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2312 if (WARN_ON_ONCE(!kcp)) 2313 return; 2314 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2315 if (WARN_ON_ONCE(!rhp)) { 2316 kmem_cache_destroy(kcp); 2317 return; 2318 } 2319 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2320 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2321 mem_dump_obj(ZERO_SIZE_PTR); 2322 pr_alert("mem_dump_obj(NULL):"); 2323 mem_dump_obj(NULL); 2324 pr_alert("mem_dump_obj(%px):", &rhp); 2325 mem_dump_obj(&rhp); 2326 pr_alert("mem_dump_obj(%px):", rhp); 2327 mem_dump_obj(rhp); 2328 pr_alert("mem_dump_obj(%px):", &rhp->func); 2329 mem_dump_obj(&rhp->func); 2330 pr_alert("mem_dump_obj(%px):", &z); 2331 mem_dump_obj(&z); 2332 kmem_cache_free(kcp, rhp); 2333 kmem_cache_destroy(kcp); 2334 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2335 if (WARN_ON_ONCE(!rhp)) 2336 return; 2337 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2338 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2339 mem_dump_obj(rhp); 2340 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2341 mem_dump_obj(&rhp->func); 2342 kfree(rhp); 2343 rhp = vmalloc(4096); 2344 if (WARN_ON_ONCE(!rhp)) 2345 return; 2346 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2347 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2348 mem_dump_obj(rhp); 2349 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2350 mem_dump_obj(&rhp->func); 2351 vfree(rhp); 2352 } 2353 2354 static void 2355 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2356 { 2357 pr_alert("%s" TORTURE_FLAG 2358 "--- %s: nreaders=%d nfakewriters=%d " 2359 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2360 "shuffle_interval=%d stutter=%d irqreader=%d " 2361 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2362 "test_boost=%d/%d test_boost_interval=%d " 2363 "test_boost_duration=%d shutdown_secs=%d " 2364 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2365 "stall_cpu_block=%d " 2366 "n_barrier_cbs=%d " 2367 "onoff_interval=%d onoff_holdoff=%d " 2368 "read_exit_delay=%d read_exit_burst=%d " 2369 "nocbs_nthreads=%d nocbs_toggle=%d " 2370 "test_nmis=%d\n", 2371 torture_type, tag, nrealreaders, nfakewriters, 2372 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2373 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2374 test_boost, cur_ops->can_boost, 2375 test_boost_interval, test_boost_duration, shutdown_secs, 2376 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2377 stall_cpu_block, 2378 n_barrier_cbs, 2379 onoff_interval, onoff_holdoff, 2380 read_exit_delay, read_exit_burst, 2381 nocbs_nthreads, nocbs_toggle, 2382 test_nmis); 2383 } 2384 2385 static int rcutorture_booster_cleanup(unsigned int cpu) 2386 { 2387 struct task_struct *t; 2388 2389 if (boost_tasks[cpu] == NULL) 2390 return 0; 2391 mutex_lock(&boost_mutex); 2392 t = boost_tasks[cpu]; 2393 boost_tasks[cpu] = NULL; 2394 rcu_torture_enable_rt_throttle(); 2395 mutex_unlock(&boost_mutex); 2396 2397 /* This must be outside of the mutex, otherwise deadlock! */ 2398 torture_stop_kthread(rcu_torture_boost, t); 2399 return 0; 2400 } 2401 2402 static int rcutorture_booster_init(unsigned int cpu) 2403 { 2404 int retval; 2405 2406 if (boost_tasks[cpu] != NULL) 2407 return 0; /* Already created, nothing more to do. */ 2408 2409 // Testing RCU priority boosting requires rcutorture do 2410 // some serious abuse. Counter this by running ksoftirqd 2411 // at higher priority. 2412 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2413 struct sched_param sp; 2414 struct task_struct *t; 2415 2416 t = per_cpu(ksoftirqd, cpu); 2417 WARN_ON_ONCE(!t); 2418 sp.sched_priority = 2; 2419 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2420 } 2421 2422 /* Don't allow time recalculation while creating a new task. */ 2423 mutex_lock(&boost_mutex); 2424 rcu_torture_disable_rt_throttle(); 2425 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2426 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2427 cpu, "rcu_torture_boost_%u"); 2428 if (IS_ERR(boost_tasks[cpu])) { 2429 retval = PTR_ERR(boost_tasks[cpu]); 2430 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2431 n_rcu_torture_boost_ktrerror++; 2432 boost_tasks[cpu] = NULL; 2433 mutex_unlock(&boost_mutex); 2434 return retval; 2435 } 2436 mutex_unlock(&boost_mutex); 2437 return 0; 2438 } 2439 2440 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2441 { 2442 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2443 return NOTIFY_OK; 2444 } 2445 2446 static struct notifier_block rcu_torture_stall_block = { 2447 .notifier_call = rcu_torture_stall_nf, 2448 }; 2449 2450 /* 2451 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2452 * induces a CPU stall for the time specified by stall_cpu. If a new 2453 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 2454 */ 2455 static int rcu_torture_stall(void *args) 2456 { 2457 int idx; 2458 int ret; 2459 unsigned long stop_at; 2460 2461 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2462 if (rcu_cpu_stall_notifiers) { 2463 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 2464 if (ret) 2465 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 2466 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 2467 } 2468 if (stall_cpu_holdoff > 0) { 2469 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2470 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2471 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2472 } 2473 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2474 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2475 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2476 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2477 if (kthread_should_stop()) 2478 break; 2479 schedule_timeout_uninterruptible(HZ); 2480 } 2481 } 2482 if (!kthread_should_stop() && stall_cpu > 0) { 2483 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2484 stop_at = ktime_get_seconds() + stall_cpu; 2485 /* RCU CPU stall is expected behavior in following code. */ 2486 idx = cur_ops->readlock(); 2487 if (stall_cpu_irqsoff) 2488 local_irq_disable(); 2489 else if (!stall_cpu_block) 2490 preempt_disable(); 2491 pr_alert("%s start on CPU %d.\n", 2492 __func__, raw_smp_processor_id()); 2493 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 2494 !kthread_should_stop()) 2495 if (stall_cpu_block) { 2496 #ifdef CONFIG_PREEMPTION 2497 preempt_schedule(); 2498 #else 2499 schedule_timeout_uninterruptible(HZ); 2500 #endif 2501 } else if (stall_no_softlockup) { 2502 touch_softlockup_watchdog(); 2503 } 2504 if (stall_cpu_irqsoff) 2505 local_irq_enable(); 2506 else if (!stall_cpu_block) 2507 preempt_enable(); 2508 cur_ops->readunlock(idx); 2509 } 2510 pr_alert("%s end.\n", __func__); 2511 if (rcu_cpu_stall_notifiers && !ret) { 2512 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 2513 if (ret) 2514 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 2515 } 2516 torture_shutdown_absorb("rcu_torture_stall"); 2517 while (!kthread_should_stop()) 2518 schedule_timeout_interruptible(10 * HZ); 2519 return 0; 2520 } 2521 2522 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2523 static int __init rcu_torture_stall_init(void) 2524 { 2525 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2526 return 0; 2527 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2528 } 2529 2530 /* State structure for forward-progress self-propagating RCU callback. */ 2531 struct fwd_cb_state { 2532 struct rcu_head rh; 2533 int stop; 2534 }; 2535 2536 /* 2537 * Forward-progress self-propagating RCU callback function. Because 2538 * callbacks run from softirq, this function is an implicit RCU read-side 2539 * critical section. 2540 */ 2541 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2542 { 2543 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2544 2545 if (READ_ONCE(fcsp->stop)) { 2546 WRITE_ONCE(fcsp->stop, 2); 2547 return; 2548 } 2549 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2550 } 2551 2552 /* State for continuous-flood RCU callbacks. */ 2553 struct rcu_fwd_cb { 2554 struct rcu_head rh; 2555 struct rcu_fwd_cb *rfc_next; 2556 struct rcu_fwd *rfc_rfp; 2557 int rfc_gps; 2558 }; 2559 2560 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2561 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2562 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2563 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2564 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2565 2566 struct rcu_launder_hist { 2567 long n_launders; 2568 unsigned long launder_gp_seq; 2569 }; 2570 2571 struct rcu_fwd { 2572 spinlock_t rcu_fwd_lock; 2573 struct rcu_fwd_cb *rcu_fwd_cb_head; 2574 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2575 long n_launders_cb; 2576 unsigned long rcu_fwd_startat; 2577 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2578 unsigned long rcu_launder_gp_seq_start; 2579 int rcu_fwd_id; 2580 }; 2581 2582 static DEFINE_MUTEX(rcu_fwd_mutex); 2583 static struct rcu_fwd *rcu_fwds; 2584 static unsigned long rcu_fwd_seq; 2585 static atomic_long_t rcu_fwd_max_cbs; 2586 static bool rcu_fwd_emergency_stop; 2587 2588 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2589 { 2590 unsigned long gps; 2591 unsigned long gps_old; 2592 int i; 2593 int j; 2594 2595 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2596 if (rfp->n_launders_hist[i].n_launders > 0) 2597 break; 2598 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2599 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2600 gps_old = rfp->rcu_launder_gp_seq_start; 2601 for (j = 0; j <= i; j++) { 2602 gps = rfp->n_launders_hist[j].launder_gp_seq; 2603 pr_cont(" %ds/%d: %ld:%ld", 2604 j + 1, FWD_CBS_HIST_DIV, 2605 rfp->n_launders_hist[j].n_launders, 2606 rcutorture_seq_diff(gps, gps_old)); 2607 gps_old = gps; 2608 } 2609 pr_cont("\n"); 2610 } 2611 2612 /* Callback function for continuous-flood RCU callbacks. */ 2613 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2614 { 2615 unsigned long flags; 2616 int i; 2617 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2618 struct rcu_fwd_cb **rfcpp; 2619 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2620 2621 rfcp->rfc_next = NULL; 2622 rfcp->rfc_gps++; 2623 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2624 rfcpp = rfp->rcu_fwd_cb_tail; 2625 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2626 smp_store_release(rfcpp, rfcp); 2627 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2628 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2629 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2630 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2631 rfp->n_launders_hist[i].n_launders++; 2632 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2633 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2634 } 2635 2636 // Give the scheduler a chance, even on nohz_full CPUs. 2637 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2638 { 2639 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2640 // Real call_rcu() floods hit userspace, so emulate that. 2641 if (need_resched() || (iter & 0xfff)) 2642 schedule(); 2643 return; 2644 } 2645 // No userspace emulation: CB invocation throttles call_rcu() 2646 cond_resched(); 2647 } 2648 2649 /* 2650 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2651 * test is over or because we hit an OOM event. 2652 */ 2653 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2654 { 2655 unsigned long flags; 2656 unsigned long freed = 0; 2657 struct rcu_fwd_cb *rfcp; 2658 2659 for (;;) { 2660 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2661 rfcp = rfp->rcu_fwd_cb_head; 2662 if (!rfcp) { 2663 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2664 break; 2665 } 2666 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2667 if (!rfp->rcu_fwd_cb_head) 2668 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2669 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2670 kfree(rfcp); 2671 freed++; 2672 rcu_torture_fwd_prog_cond_resched(freed); 2673 if (tick_nohz_full_enabled()) { 2674 local_irq_save(flags); 2675 rcu_momentary_dyntick_idle(); 2676 local_irq_restore(flags); 2677 } 2678 } 2679 return freed; 2680 } 2681 2682 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2683 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2684 int *tested, int *tested_tries) 2685 { 2686 unsigned long cver; 2687 unsigned long dur; 2688 struct fwd_cb_state fcs; 2689 unsigned long gps; 2690 int idx; 2691 int sd; 2692 int sd4; 2693 bool selfpropcb = false; 2694 unsigned long stopat; 2695 static DEFINE_TORTURE_RANDOM(trs); 2696 2697 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2698 if (!cur_ops->sync) 2699 return; // Cannot do need_resched() forward progress testing without ->sync. 2700 if (cur_ops->call && cur_ops->cb_barrier) { 2701 init_rcu_head_on_stack(&fcs.rh); 2702 selfpropcb = true; 2703 } 2704 2705 /* Tight loop containing cond_resched(). */ 2706 atomic_inc(&rcu_fwd_cb_nodelay); 2707 cur_ops->sync(); /* Later readers see above write. */ 2708 if (selfpropcb) { 2709 WRITE_ONCE(fcs.stop, 0); 2710 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2711 } 2712 cver = READ_ONCE(rcu_torture_current_version); 2713 gps = cur_ops->get_gp_seq(); 2714 sd = cur_ops->stall_dur() + 1; 2715 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2716 dur = sd4 + torture_random(&trs) % (sd - sd4); 2717 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2718 stopat = rfp->rcu_fwd_startat + dur; 2719 while (time_before(jiffies, stopat) && 2720 !shutdown_time_arrived() && 2721 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2722 idx = cur_ops->readlock(); 2723 udelay(10); 2724 cur_ops->readunlock(idx); 2725 if (!fwd_progress_need_resched || need_resched()) 2726 cond_resched(); 2727 } 2728 (*tested_tries)++; 2729 if (!time_before(jiffies, stopat) && 2730 !shutdown_time_arrived() && 2731 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2732 (*tested)++; 2733 cver = READ_ONCE(rcu_torture_current_version) - cver; 2734 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2735 WARN_ON(!cver && gps < 2); 2736 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2737 rfp->rcu_fwd_id, dur, cver, gps); 2738 } 2739 if (selfpropcb) { 2740 WRITE_ONCE(fcs.stop, 1); 2741 cur_ops->sync(); /* Wait for running CB to complete. */ 2742 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2743 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2744 } 2745 2746 if (selfpropcb) { 2747 WARN_ON(READ_ONCE(fcs.stop) != 2); 2748 destroy_rcu_head_on_stack(&fcs.rh); 2749 } 2750 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2751 atomic_dec(&rcu_fwd_cb_nodelay); 2752 } 2753 2754 /* Carry out call_rcu() forward-progress testing. */ 2755 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2756 { 2757 unsigned long cver; 2758 unsigned long flags; 2759 unsigned long gps; 2760 int i; 2761 long n_launders; 2762 long n_launders_cb_snap; 2763 long n_launders_sa; 2764 long n_max_cbs; 2765 long n_max_gps; 2766 struct rcu_fwd_cb *rfcp; 2767 struct rcu_fwd_cb *rfcpn; 2768 unsigned long stopat; 2769 unsigned long stoppedat; 2770 2771 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2772 if (READ_ONCE(rcu_fwd_emergency_stop)) 2773 return; /* Get out of the way quickly, no GP wait! */ 2774 if (!cur_ops->call) 2775 return; /* Can't do call_rcu() fwd prog without ->call. */ 2776 2777 /* Loop continuously posting RCU callbacks. */ 2778 atomic_inc(&rcu_fwd_cb_nodelay); 2779 cur_ops->sync(); /* Later readers see above write. */ 2780 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2781 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2782 n_launders = 0; 2783 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2784 n_launders_sa = 0; 2785 n_max_cbs = 0; 2786 n_max_gps = 0; 2787 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2788 rfp->n_launders_hist[i].n_launders = 0; 2789 cver = READ_ONCE(rcu_torture_current_version); 2790 gps = cur_ops->get_gp_seq(); 2791 rfp->rcu_launder_gp_seq_start = gps; 2792 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2793 while (time_before(jiffies, stopat) && 2794 !shutdown_time_arrived() && 2795 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2796 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2797 rfcpn = NULL; 2798 if (rfcp) 2799 rfcpn = READ_ONCE(rfcp->rfc_next); 2800 if (rfcpn) { 2801 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2802 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2803 break; 2804 rfp->rcu_fwd_cb_head = rfcpn; 2805 n_launders++; 2806 n_launders_sa++; 2807 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2808 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2809 if (WARN_ON_ONCE(!rfcp)) { 2810 schedule_timeout_interruptible(1); 2811 continue; 2812 } 2813 n_max_cbs++; 2814 n_launders_sa = 0; 2815 rfcp->rfc_gps = 0; 2816 rfcp->rfc_rfp = rfp; 2817 } else { 2818 rfcp = NULL; 2819 } 2820 if (rfcp) 2821 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2822 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2823 if (tick_nohz_full_enabled()) { 2824 local_irq_save(flags); 2825 rcu_momentary_dyntick_idle(); 2826 local_irq_restore(flags); 2827 } 2828 } 2829 stoppedat = jiffies; 2830 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2831 cver = READ_ONCE(rcu_torture_current_version) - cver; 2832 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2833 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2834 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2835 (void)rcu_torture_fwd_prog_cbfree(rfp); 2836 2837 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2838 !shutdown_time_arrived()) { 2839 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 2840 cur_ops->gp_kthread_dbg(); 2841 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 2842 __func__, 2843 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2844 n_launders + n_max_cbs - n_launders_cb_snap, 2845 n_launders, n_launders_sa, 2846 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 2847 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2848 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2849 rcu_torture_fwd_cb_hist(rfp); 2850 mutex_unlock(&rcu_fwd_mutex); 2851 } 2852 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2853 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2854 atomic_dec(&rcu_fwd_cb_nodelay); 2855 } 2856 2857 2858 /* 2859 * OOM notifier, but this only prints diagnostic information for the 2860 * current forward-progress test. 2861 */ 2862 static int rcutorture_oom_notify(struct notifier_block *self, 2863 unsigned long notused, void *nfreed) 2864 { 2865 int i; 2866 long ncbs; 2867 struct rcu_fwd *rfp; 2868 2869 mutex_lock(&rcu_fwd_mutex); 2870 rfp = rcu_fwds; 2871 if (!rfp) { 2872 mutex_unlock(&rcu_fwd_mutex); 2873 return NOTIFY_OK; 2874 } 2875 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2876 __func__); 2877 for (i = 0; i < fwd_progress; i++) { 2878 rcu_torture_fwd_cb_hist(&rfp[i]); 2879 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2880 } 2881 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2882 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2883 ncbs = 0; 2884 for (i = 0; i < fwd_progress; i++) 2885 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2886 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2887 cur_ops->cb_barrier(); 2888 ncbs = 0; 2889 for (i = 0; i < fwd_progress; i++) 2890 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2891 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2892 cur_ops->cb_barrier(); 2893 ncbs = 0; 2894 for (i = 0; i < fwd_progress; i++) 2895 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2896 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2897 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2898 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2899 pr_info("%s returning after OOM processing.\n", __func__); 2900 mutex_unlock(&rcu_fwd_mutex); 2901 return NOTIFY_OK; 2902 } 2903 2904 static struct notifier_block rcutorture_oom_nb = { 2905 .notifier_call = rcutorture_oom_notify 2906 }; 2907 2908 /* Carry out grace-period forward-progress testing. */ 2909 static int rcu_torture_fwd_prog(void *args) 2910 { 2911 bool firsttime = true; 2912 long max_cbs; 2913 int oldnice = task_nice(current); 2914 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2915 struct rcu_fwd *rfp = args; 2916 int tested = 0; 2917 int tested_tries = 0; 2918 2919 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2920 rcu_bind_current_to_nocb(); 2921 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2922 set_user_nice(current, MAX_NICE); 2923 do { 2924 if (!rfp->rcu_fwd_id) { 2925 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2926 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2927 if (!firsttime) { 2928 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2929 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2930 } 2931 firsttime = false; 2932 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2933 } else { 2934 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2935 schedule_timeout_interruptible(HZ / 20); 2936 oldseq = READ_ONCE(rcu_fwd_seq); 2937 } 2938 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2939 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2940 rcu_torture_fwd_prog_cr(rfp); 2941 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2942 (!IS_ENABLED(CONFIG_TINY_RCU) || 2943 (rcu_inkernel_boot_has_ended() && 2944 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2945 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2946 2947 /* Avoid slow periods, better to test when busy. */ 2948 if (stutter_wait("rcu_torture_fwd_prog")) 2949 sched_set_normal(current, oldnice); 2950 } while (!torture_must_stop()); 2951 /* Short runs might not contain a valid forward-progress attempt. */ 2952 if (!rfp->rcu_fwd_id) { 2953 WARN_ON(!tested && tested_tries >= 5); 2954 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2955 } 2956 torture_kthread_stopping("rcu_torture_fwd_prog"); 2957 return 0; 2958 } 2959 2960 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2961 static int __init rcu_torture_fwd_prog_init(void) 2962 { 2963 int i; 2964 int ret = 0; 2965 struct rcu_fwd *rfp; 2966 2967 if (!fwd_progress) 2968 return 0; /* Not requested, so don't do it. */ 2969 if (fwd_progress >= nr_cpu_ids) { 2970 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2971 fwd_progress = nr_cpu_ids; 2972 } else if (fwd_progress < 0) { 2973 fwd_progress = nr_cpu_ids; 2974 } 2975 if ((!cur_ops->sync && !cur_ops->call) || 2976 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2977 cur_ops == &rcu_busted_ops) { 2978 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2979 fwd_progress = 0; 2980 return 0; 2981 } 2982 if (stall_cpu > 0) { 2983 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2984 fwd_progress = 0; 2985 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2986 return -EINVAL; /* In module, can fail back to user. */ 2987 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2988 return 0; 2989 } 2990 if (fwd_progress_holdoff <= 0) 2991 fwd_progress_holdoff = 1; 2992 if (fwd_progress_div <= 0) 2993 fwd_progress_div = 4; 2994 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 2995 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 2996 if (!rfp || !fwd_prog_tasks) { 2997 kfree(rfp); 2998 kfree(fwd_prog_tasks); 2999 fwd_prog_tasks = NULL; 3000 fwd_progress = 0; 3001 return -ENOMEM; 3002 } 3003 for (i = 0; i < fwd_progress; i++) { 3004 spin_lock_init(&rfp[i].rcu_fwd_lock); 3005 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3006 rfp[i].rcu_fwd_id = i; 3007 } 3008 mutex_lock(&rcu_fwd_mutex); 3009 rcu_fwds = rfp; 3010 mutex_unlock(&rcu_fwd_mutex); 3011 register_oom_notifier(&rcutorture_oom_nb); 3012 for (i = 0; i < fwd_progress; i++) { 3013 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3014 if (ret) { 3015 fwd_progress = i; 3016 return ret; 3017 } 3018 } 3019 return 0; 3020 } 3021 3022 static void rcu_torture_fwd_prog_cleanup(void) 3023 { 3024 int i; 3025 struct rcu_fwd *rfp; 3026 3027 if (!rcu_fwds || !fwd_prog_tasks) 3028 return; 3029 for (i = 0; i < fwd_progress; i++) 3030 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3031 unregister_oom_notifier(&rcutorture_oom_nb); 3032 mutex_lock(&rcu_fwd_mutex); 3033 rfp = rcu_fwds; 3034 rcu_fwds = NULL; 3035 mutex_unlock(&rcu_fwd_mutex); 3036 kfree(rfp); 3037 kfree(fwd_prog_tasks); 3038 fwd_prog_tasks = NULL; 3039 } 3040 3041 /* Callback function for RCU barrier testing. */ 3042 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3043 { 3044 atomic_inc(&barrier_cbs_invoked); 3045 } 3046 3047 /* IPI handler to get callback posted on desired CPU, if online. */ 3048 static int rcu_torture_barrier1cb(void *rcu_void) 3049 { 3050 struct rcu_head *rhp = rcu_void; 3051 3052 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3053 return 0; 3054 } 3055 3056 /* kthread function to register callbacks used to test RCU barriers. */ 3057 static int rcu_torture_barrier_cbs(void *arg) 3058 { 3059 long myid = (long)arg; 3060 bool lastphase = false; 3061 bool newphase; 3062 struct rcu_head rcu; 3063 3064 init_rcu_head_on_stack(&rcu); 3065 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3066 set_user_nice(current, MAX_NICE); 3067 do { 3068 wait_event(barrier_cbs_wq[myid], 3069 (newphase = 3070 smp_load_acquire(&barrier_phase)) != lastphase || 3071 torture_must_stop()); 3072 lastphase = newphase; 3073 if (torture_must_stop()) 3074 break; 3075 /* 3076 * The above smp_load_acquire() ensures barrier_phase load 3077 * is ordered before the following ->call(). 3078 */ 3079 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3080 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3081 3082 if (atomic_dec_and_test(&barrier_cbs_count)) 3083 wake_up(&barrier_wq); 3084 } while (!torture_must_stop()); 3085 if (cur_ops->cb_barrier != NULL) 3086 cur_ops->cb_barrier(); 3087 destroy_rcu_head_on_stack(&rcu); 3088 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3089 return 0; 3090 } 3091 3092 /* kthread function to drive and coordinate RCU barrier testing. */ 3093 static int rcu_torture_barrier(void *arg) 3094 { 3095 int i; 3096 3097 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3098 do { 3099 atomic_set(&barrier_cbs_invoked, 0); 3100 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3101 /* Ensure barrier_phase ordered after prior assignments. */ 3102 smp_store_release(&barrier_phase, !barrier_phase); 3103 for (i = 0; i < n_barrier_cbs; i++) 3104 wake_up(&barrier_cbs_wq[i]); 3105 wait_event(barrier_wq, 3106 atomic_read(&barrier_cbs_count) == 0 || 3107 torture_must_stop()); 3108 if (torture_must_stop()) 3109 break; 3110 n_barrier_attempts++; 3111 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3112 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3113 n_rcu_torture_barrier_error++; 3114 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3115 atomic_read(&barrier_cbs_invoked), 3116 n_barrier_cbs); 3117 WARN_ON(1); 3118 // Wait manually for the remaining callbacks 3119 i = 0; 3120 do { 3121 if (WARN_ON(i++ > HZ)) 3122 i = INT_MIN; 3123 schedule_timeout_interruptible(1); 3124 cur_ops->cb_barrier(); 3125 } while (atomic_read(&barrier_cbs_invoked) != 3126 n_barrier_cbs && 3127 !torture_must_stop()); 3128 smp_mb(); // Can't trust ordering if broken. 3129 if (!torture_must_stop()) 3130 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3131 atomic_read(&barrier_cbs_invoked)); 3132 } else { 3133 n_barrier_successes++; 3134 } 3135 schedule_timeout_interruptible(HZ / 10); 3136 } while (!torture_must_stop()); 3137 torture_kthread_stopping("rcu_torture_barrier"); 3138 return 0; 3139 } 3140 3141 /* Initialize RCU barrier testing. */ 3142 static int rcu_torture_barrier_init(void) 3143 { 3144 int i; 3145 int ret; 3146 3147 if (n_barrier_cbs <= 0) 3148 return 0; 3149 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3150 pr_alert("%s" TORTURE_FLAG 3151 " Call or barrier ops missing for %s,\n", 3152 torture_type, cur_ops->name); 3153 pr_alert("%s" TORTURE_FLAG 3154 " RCU barrier testing omitted from run.\n", 3155 torture_type); 3156 return 0; 3157 } 3158 atomic_set(&barrier_cbs_count, 0); 3159 atomic_set(&barrier_cbs_invoked, 0); 3160 barrier_cbs_tasks = 3161 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3162 GFP_KERNEL); 3163 barrier_cbs_wq = 3164 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3165 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3166 return -ENOMEM; 3167 for (i = 0; i < n_barrier_cbs; i++) { 3168 init_waitqueue_head(&barrier_cbs_wq[i]); 3169 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3170 (void *)(long)i, 3171 barrier_cbs_tasks[i]); 3172 if (ret) 3173 return ret; 3174 } 3175 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3176 } 3177 3178 /* Clean up after RCU barrier testing. */ 3179 static void rcu_torture_barrier_cleanup(void) 3180 { 3181 int i; 3182 3183 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3184 if (barrier_cbs_tasks != NULL) { 3185 for (i = 0; i < n_barrier_cbs; i++) 3186 torture_stop_kthread(rcu_torture_barrier_cbs, 3187 barrier_cbs_tasks[i]); 3188 kfree(barrier_cbs_tasks); 3189 barrier_cbs_tasks = NULL; 3190 } 3191 if (barrier_cbs_wq != NULL) { 3192 kfree(barrier_cbs_wq); 3193 barrier_cbs_wq = NULL; 3194 } 3195 } 3196 3197 static bool rcu_torture_can_boost(void) 3198 { 3199 static int boost_warn_once; 3200 int prio; 3201 3202 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3203 return false; 3204 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3205 return false; 3206 3207 prio = rcu_get_gp_kthreads_prio(); 3208 if (!prio) 3209 return false; 3210 3211 if (prio < 2) { 3212 if (boost_warn_once == 1) 3213 return false; 3214 3215 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3216 boost_warn_once = 1; 3217 return false; 3218 } 3219 3220 return true; 3221 } 3222 3223 static bool read_exit_child_stop; 3224 static bool read_exit_child_stopped; 3225 static wait_queue_head_t read_exit_wq; 3226 3227 // Child kthread which just does an rcutorture reader and exits. 3228 static int rcu_torture_read_exit_child(void *trsp_in) 3229 { 3230 struct torture_random_state *trsp = trsp_in; 3231 3232 set_user_nice(current, MAX_NICE); 3233 // Minimize time between reading and exiting. 3234 while (!kthread_should_stop()) 3235 schedule_timeout_uninterruptible(HZ / 20); 3236 (void)rcu_torture_one_read(trsp, -1); 3237 return 0; 3238 } 3239 3240 // Parent kthread which creates and destroys read-exit child kthreads. 3241 static int rcu_torture_read_exit(void *unused) 3242 { 3243 bool errexit = false; 3244 int i; 3245 struct task_struct *tsp; 3246 DEFINE_TORTURE_RANDOM(trs); 3247 3248 // Allocate and initialize. 3249 set_user_nice(current, MAX_NICE); 3250 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3251 3252 // Each pass through this loop does one read-exit episode. 3253 do { 3254 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3255 for (i = 0; i < read_exit_burst; i++) { 3256 if (READ_ONCE(read_exit_child_stop)) 3257 break; 3258 stutter_wait("rcu_torture_read_exit"); 3259 // Spawn child. 3260 tsp = kthread_run(rcu_torture_read_exit_child, 3261 &trs, "%s", "rcu_torture_read_exit_child"); 3262 if (IS_ERR(tsp)) { 3263 TOROUT_ERRSTRING("out of memory"); 3264 errexit = true; 3265 break; 3266 } 3267 cond_resched(); 3268 kthread_stop(tsp); 3269 n_read_exits++; 3270 } 3271 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3272 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3273 i = 0; 3274 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3275 schedule_timeout_uninterruptible(HZ); 3276 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3277 3278 // Clean up and exit. 3279 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3280 smp_mb(); // Store before wakeup. 3281 wake_up(&read_exit_wq); 3282 while (!torture_must_stop()) 3283 schedule_timeout_uninterruptible(HZ / 20); 3284 torture_kthread_stopping("rcu_torture_read_exit"); 3285 return 0; 3286 } 3287 3288 static int rcu_torture_read_exit_init(void) 3289 { 3290 if (read_exit_burst <= 0) 3291 return 0; 3292 init_waitqueue_head(&read_exit_wq); 3293 read_exit_child_stop = false; 3294 read_exit_child_stopped = false; 3295 return torture_create_kthread(rcu_torture_read_exit, NULL, 3296 read_exit_task); 3297 } 3298 3299 static void rcu_torture_read_exit_cleanup(void) 3300 { 3301 if (!read_exit_task) 3302 return; 3303 WRITE_ONCE(read_exit_child_stop, true); 3304 smp_mb(); // Above write before wait. 3305 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3306 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3307 } 3308 3309 static void rcutorture_test_nmis(int n) 3310 { 3311 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3312 int cpu; 3313 int dumpcpu; 3314 int i; 3315 3316 for (i = 0; i < n; i++) { 3317 preempt_disable(); 3318 cpu = smp_processor_id(); 3319 dumpcpu = cpu + 1; 3320 if (dumpcpu >= nr_cpu_ids) 3321 dumpcpu = 0; 3322 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3323 dump_cpu_task(dumpcpu); 3324 preempt_enable(); 3325 schedule_timeout_uninterruptible(15 * HZ); 3326 } 3327 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3328 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3329 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3330 } 3331 3332 static enum cpuhp_state rcutor_hp; 3333 3334 static void 3335 rcu_torture_cleanup(void) 3336 { 3337 int firsttime; 3338 int flags = 0; 3339 unsigned long gp_seq = 0; 3340 int i; 3341 3342 if (torture_cleanup_begin()) { 3343 if (cur_ops->cb_barrier != NULL) { 3344 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3345 cur_ops->cb_barrier(); 3346 } 3347 if (cur_ops->gp_slow_unregister) 3348 cur_ops->gp_slow_unregister(NULL); 3349 return; 3350 } 3351 if (!cur_ops) { 3352 torture_cleanup_end(); 3353 return; 3354 } 3355 3356 rcutorture_test_nmis(test_nmis); 3357 3358 if (cur_ops->gp_kthread_dbg) 3359 cur_ops->gp_kthread_dbg(); 3360 rcu_torture_read_exit_cleanup(); 3361 rcu_torture_barrier_cleanup(); 3362 rcu_torture_fwd_prog_cleanup(); 3363 torture_stop_kthread(rcu_torture_stall, stall_task); 3364 torture_stop_kthread(rcu_torture_writer, writer_task); 3365 3366 if (nocb_tasks) { 3367 for (i = 0; i < nrealnocbers; i++) 3368 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3369 kfree(nocb_tasks); 3370 nocb_tasks = NULL; 3371 } 3372 3373 if (reader_tasks) { 3374 for (i = 0; i < nrealreaders; i++) 3375 torture_stop_kthread(rcu_torture_reader, 3376 reader_tasks[i]); 3377 kfree(reader_tasks); 3378 reader_tasks = NULL; 3379 } 3380 kfree(rcu_torture_reader_mbchk); 3381 rcu_torture_reader_mbchk = NULL; 3382 3383 if (fakewriter_tasks) { 3384 for (i = 0; i < nfakewriters; i++) 3385 torture_stop_kthread(rcu_torture_fakewriter, 3386 fakewriter_tasks[i]); 3387 kfree(fakewriter_tasks); 3388 fakewriter_tasks = NULL; 3389 } 3390 3391 if (cur_ops->get_gp_data) 3392 cur_ops->get_gp_data(&flags, &gp_seq); 3393 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3394 cur_ops->name, (long)gp_seq, flags, 3395 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3396 torture_stop_kthread(rcu_torture_stats, stats_task); 3397 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3398 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3399 cpuhp_remove_state(rcutor_hp); 3400 3401 /* 3402 * Wait for all RCU callbacks to fire, then do torture-type-specific 3403 * cleanup operations. 3404 */ 3405 if (cur_ops->cb_barrier != NULL) { 3406 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3407 cur_ops->cb_barrier(); 3408 } 3409 if (cur_ops->cleanup != NULL) 3410 cur_ops->cleanup(); 3411 3412 rcu_torture_mem_dump_obj(); 3413 3414 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3415 3416 if (err_segs_recorded) { 3417 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3418 if (rt_read_nsegs == 0) 3419 pr_alert("\t: No segments recorded!!!\n"); 3420 firsttime = 1; 3421 for (i = 0; i < rt_read_nsegs; i++) { 3422 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3423 if (err_segs[i].rt_delay_jiffies != 0) { 3424 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3425 err_segs[i].rt_delay_jiffies); 3426 firsttime = 0; 3427 } 3428 if (err_segs[i].rt_delay_ms != 0) { 3429 pr_cont("%s%ldms", firsttime ? "" : "+", 3430 err_segs[i].rt_delay_ms); 3431 firsttime = 0; 3432 } 3433 if (err_segs[i].rt_delay_us != 0) { 3434 pr_cont("%s%ldus", firsttime ? "" : "+", 3435 err_segs[i].rt_delay_us); 3436 firsttime = 0; 3437 } 3438 pr_cont("%s\n", 3439 err_segs[i].rt_preempted ? "preempted" : ""); 3440 3441 } 3442 } 3443 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3444 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3445 else if (torture_onoff_failures()) 3446 rcu_torture_print_module_parms(cur_ops, 3447 "End of test: RCU_HOTPLUG"); 3448 else 3449 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3450 torture_cleanup_end(); 3451 if (cur_ops->gp_slow_unregister) 3452 cur_ops->gp_slow_unregister(NULL); 3453 } 3454 3455 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3456 { 3457 } 3458 3459 static void rcu_torture_err_cb(struct rcu_head *rhp) 3460 { 3461 /* 3462 * This -might- happen due to race conditions, but is unlikely. 3463 * The scenario that leads to this happening is that the 3464 * first of the pair of duplicate callbacks is queued, 3465 * someone else starts a grace period that includes that 3466 * callback, then the second of the pair must wait for the 3467 * next grace period. Unlikely, but can happen. If it 3468 * does happen, the debug-objects subsystem won't have splatted. 3469 */ 3470 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3471 } 3472 3473 /* 3474 * Verify that double-free causes debug-objects to complain, but only 3475 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3476 * cannot be carried out. 3477 */ 3478 static void rcu_test_debug_objects(void) 3479 { 3480 struct rcu_head rh1; 3481 struct rcu_head rh2; 3482 int idx; 3483 3484 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 3485 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 3486 KBUILD_MODNAME, cur_ops->name); 3487 return; 3488 } 3489 3490 if (WARN_ON_ONCE(cur_ops->debug_objects && 3491 (!cur_ops->call || !cur_ops->cb_barrier))) 3492 return; 3493 3494 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3495 3496 init_rcu_head_on_stack(&rh1); 3497 init_rcu_head_on_stack(&rh2); 3498 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 3499 3500 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3501 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 3502 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3503 cur_ops->call(&rh2, rcu_torture_leak_cb); 3504 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3505 if (rhp) { 3506 cur_ops->call(rhp, rcu_torture_leak_cb); 3507 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3508 } 3509 cur_ops->readunlock(idx); 3510 3511 /* Wait for them all to get done so we can safely return. */ 3512 cur_ops->cb_barrier(); 3513 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 3514 destroy_rcu_head_on_stack(&rh1); 3515 destroy_rcu_head_on_stack(&rh2); 3516 kfree(rhp); 3517 } 3518 3519 static void rcutorture_sync(void) 3520 { 3521 static unsigned long n; 3522 3523 if (cur_ops->sync && !(++n & 0xfff)) 3524 cur_ops->sync(); 3525 } 3526 3527 static DEFINE_MUTEX(mut0); 3528 static DEFINE_MUTEX(mut1); 3529 static DEFINE_MUTEX(mut2); 3530 static DEFINE_MUTEX(mut3); 3531 static DEFINE_MUTEX(mut4); 3532 static DEFINE_MUTEX(mut5); 3533 static DEFINE_MUTEX(mut6); 3534 static DEFINE_MUTEX(mut7); 3535 static DEFINE_MUTEX(mut8); 3536 static DEFINE_MUTEX(mut9); 3537 3538 static DECLARE_RWSEM(rwsem0); 3539 static DECLARE_RWSEM(rwsem1); 3540 static DECLARE_RWSEM(rwsem2); 3541 static DECLARE_RWSEM(rwsem3); 3542 static DECLARE_RWSEM(rwsem4); 3543 static DECLARE_RWSEM(rwsem5); 3544 static DECLARE_RWSEM(rwsem6); 3545 static DECLARE_RWSEM(rwsem7); 3546 static DECLARE_RWSEM(rwsem8); 3547 static DECLARE_RWSEM(rwsem9); 3548 3549 DEFINE_STATIC_SRCU(srcu0); 3550 DEFINE_STATIC_SRCU(srcu1); 3551 DEFINE_STATIC_SRCU(srcu2); 3552 DEFINE_STATIC_SRCU(srcu3); 3553 DEFINE_STATIC_SRCU(srcu4); 3554 DEFINE_STATIC_SRCU(srcu5); 3555 DEFINE_STATIC_SRCU(srcu6); 3556 DEFINE_STATIC_SRCU(srcu7); 3557 DEFINE_STATIC_SRCU(srcu8); 3558 DEFINE_STATIC_SRCU(srcu9); 3559 3560 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 3561 int cyclelen, int deadlock) 3562 { 3563 int j = i + 1; 3564 3565 if (j >= cyclelen) 3566 j = deadlock ? 0 : -1; 3567 if (j >= 0) 3568 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 3569 else 3570 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 3571 return j; 3572 } 3573 3574 // Test lockdep on SRCU-based deadlock scenarios. 3575 static void rcu_torture_init_srcu_lockdep(void) 3576 { 3577 int cyclelen; 3578 int deadlock; 3579 bool err = false; 3580 int i; 3581 int j; 3582 int idx; 3583 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 3584 &mut5, &mut6, &mut7, &mut8, &mut9 }; 3585 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 3586 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 3587 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 3588 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 3589 int testtype; 3590 3591 if (!test_srcu_lockdep) 3592 return; 3593 3594 deadlock = test_srcu_lockdep / 1000; 3595 testtype = (test_srcu_lockdep / 10) % 100; 3596 cyclelen = test_srcu_lockdep % 10; 3597 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 3598 if (WARN_ONCE(deadlock != !!deadlock, 3599 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 3600 __func__, test_srcu_lockdep, deadlock)) 3601 err = true; 3602 if (WARN_ONCE(cyclelen <= 0, 3603 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 3604 __func__, test_srcu_lockdep, cyclelen)) 3605 err = true; 3606 if (err) 3607 goto err_out; 3608 3609 if (testtype == 0) { 3610 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 3611 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3612 if (deadlock && cyclelen == 1) 3613 pr_info("%s: Expect hang.\n", __func__); 3614 for (i = 0; i < cyclelen; i++) { 3615 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 3616 "srcu_read_unlock", i, cyclelen, deadlock); 3617 idx = srcu_read_lock(srcus[i]); 3618 if (j >= 0) 3619 synchronize_srcu(srcus[j]); 3620 srcu_read_unlock(srcus[i], idx); 3621 } 3622 return; 3623 } 3624 3625 if (testtype == 1) { 3626 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 3627 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3628 for (i = 0; i < cyclelen; i++) { 3629 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 3630 __func__, i, i, i, i); 3631 idx = srcu_read_lock(srcus[i]); 3632 mutex_lock(muts[i]); 3633 mutex_unlock(muts[i]); 3634 srcu_read_unlock(srcus[i], idx); 3635 3636 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 3637 "mutex_unlock", i, cyclelen, deadlock); 3638 mutex_lock(muts[i]); 3639 if (j >= 0) 3640 synchronize_srcu(srcus[j]); 3641 mutex_unlock(muts[i]); 3642 } 3643 return; 3644 } 3645 3646 if (testtype == 2) { 3647 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 3648 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3649 for (i = 0; i < cyclelen; i++) { 3650 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 3651 __func__, i, i, i, i); 3652 idx = srcu_read_lock(srcus[i]); 3653 down_read(rwsems[i]); 3654 up_read(rwsems[i]); 3655 srcu_read_unlock(srcus[i], idx); 3656 3657 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 3658 "up_write", i, cyclelen, deadlock); 3659 down_write(rwsems[i]); 3660 if (j >= 0) 3661 synchronize_srcu(srcus[j]); 3662 up_write(rwsems[i]); 3663 } 3664 return; 3665 } 3666 3667 #ifdef CONFIG_TASKS_TRACE_RCU 3668 if (testtype == 3) { 3669 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 3670 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3671 if (deadlock && cyclelen == 1) 3672 pr_info("%s: Expect hang.\n", __func__); 3673 for (i = 0; i < cyclelen; i++) { 3674 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 3675 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 3676 : "synchronize_srcu"; 3677 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 3678 3679 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 3680 if (i == 0) 3681 rcu_read_lock_trace(); 3682 else 3683 idx = srcu_read_lock(srcus[i]); 3684 if (j >= 0) { 3685 if (i == cyclelen - 1) 3686 synchronize_rcu_tasks_trace(); 3687 else 3688 synchronize_srcu(srcus[j]); 3689 } 3690 if (i == 0) 3691 rcu_read_unlock_trace(); 3692 else 3693 srcu_read_unlock(srcus[i], idx); 3694 } 3695 return; 3696 } 3697 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 3698 3699 err_out: 3700 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 3701 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 3702 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 3703 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 3704 pr_info("%s: L: Cycle length.\n", __func__); 3705 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 3706 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 3707 } 3708 3709 static int __init 3710 rcu_torture_init(void) 3711 { 3712 long i; 3713 int cpu; 3714 int firsterr = 0; 3715 int flags = 0; 3716 unsigned long gp_seq = 0; 3717 static struct rcu_torture_ops *torture_ops[] = { 3718 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3719 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3720 &trivial_ops, 3721 }; 3722 3723 if (!torture_init_begin(torture_type, verbose)) 3724 return -EBUSY; 3725 3726 /* Process args and tell the world that the torturer is on the job. */ 3727 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3728 cur_ops = torture_ops[i]; 3729 if (strcmp(torture_type, cur_ops->name) == 0) 3730 break; 3731 } 3732 if (i == ARRAY_SIZE(torture_ops)) { 3733 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3734 torture_type); 3735 pr_alert("rcu-torture types:"); 3736 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3737 pr_cont(" %s", torture_ops[i]->name); 3738 pr_cont("\n"); 3739 firsterr = -EINVAL; 3740 cur_ops = NULL; 3741 goto unwind; 3742 } 3743 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3744 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3745 fqs_duration = 0; 3746 } 3747 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 3748 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3749 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 3750 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 3751 nocbs_nthreads = 0; 3752 } 3753 if (cur_ops->init) 3754 cur_ops->init(); 3755 3756 rcu_torture_init_srcu_lockdep(); 3757 3758 if (nreaders >= 0) { 3759 nrealreaders = nreaders; 3760 } else { 3761 nrealreaders = num_online_cpus() - 2 - nreaders; 3762 if (nrealreaders <= 0) 3763 nrealreaders = 1; 3764 } 3765 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3766 if (cur_ops->get_gp_data) 3767 cur_ops->get_gp_data(&flags, &gp_seq); 3768 start_gp_seq = gp_seq; 3769 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3770 cur_ops->name, (long)gp_seq, flags); 3771 3772 /* Set up the freelist. */ 3773 3774 INIT_LIST_HEAD(&rcu_torture_freelist); 3775 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3776 rcu_tortures[i].rtort_mbtest = 0; 3777 list_add_tail(&rcu_tortures[i].rtort_free, 3778 &rcu_torture_freelist); 3779 } 3780 3781 /* Initialize the statistics so that each run gets its own numbers. */ 3782 3783 rcu_torture_current = NULL; 3784 rcu_torture_current_version = 0; 3785 atomic_set(&n_rcu_torture_alloc, 0); 3786 atomic_set(&n_rcu_torture_alloc_fail, 0); 3787 atomic_set(&n_rcu_torture_free, 0); 3788 atomic_set(&n_rcu_torture_mberror, 0); 3789 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3790 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3791 atomic_set(&n_rcu_torture_error, 0); 3792 n_rcu_torture_barrier_error = 0; 3793 n_rcu_torture_boost_ktrerror = 0; 3794 n_rcu_torture_boost_failure = 0; 3795 n_rcu_torture_boosts = 0; 3796 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3797 atomic_set(&rcu_torture_wcount[i], 0); 3798 for_each_possible_cpu(cpu) { 3799 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3800 per_cpu(rcu_torture_count, cpu)[i] = 0; 3801 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3802 } 3803 } 3804 err_segs_recorded = 0; 3805 rt_read_nsegs = 0; 3806 3807 /* Start up the kthreads. */ 3808 3809 rcu_torture_write_types(); 3810 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3811 writer_task); 3812 if (torture_init_error(firsterr)) 3813 goto unwind; 3814 if (nfakewriters > 0) { 3815 fakewriter_tasks = kcalloc(nfakewriters, 3816 sizeof(fakewriter_tasks[0]), 3817 GFP_KERNEL); 3818 if (fakewriter_tasks == NULL) { 3819 TOROUT_ERRSTRING("out of memory"); 3820 firsterr = -ENOMEM; 3821 goto unwind; 3822 } 3823 } 3824 for (i = 0; i < nfakewriters; i++) { 3825 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3826 NULL, fakewriter_tasks[i]); 3827 if (torture_init_error(firsterr)) 3828 goto unwind; 3829 } 3830 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3831 GFP_KERNEL); 3832 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3833 GFP_KERNEL); 3834 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3835 TOROUT_ERRSTRING("out of memory"); 3836 firsterr = -ENOMEM; 3837 goto unwind; 3838 } 3839 for (i = 0; i < nrealreaders; i++) { 3840 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3841 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3842 reader_tasks[i]); 3843 if (torture_init_error(firsterr)) 3844 goto unwind; 3845 } 3846 nrealnocbers = nocbs_nthreads; 3847 if (WARN_ON(nrealnocbers < 0)) 3848 nrealnocbers = 1; 3849 if (WARN_ON(nocbs_toggle < 0)) 3850 nocbs_toggle = HZ; 3851 if (nrealnocbers > 0) { 3852 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3853 if (nocb_tasks == NULL) { 3854 TOROUT_ERRSTRING("out of memory"); 3855 firsterr = -ENOMEM; 3856 goto unwind; 3857 } 3858 } else { 3859 nocb_tasks = NULL; 3860 } 3861 for (i = 0; i < nrealnocbers; i++) { 3862 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3863 if (torture_init_error(firsterr)) 3864 goto unwind; 3865 } 3866 if (stat_interval > 0) { 3867 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3868 stats_task); 3869 if (torture_init_error(firsterr)) 3870 goto unwind; 3871 } 3872 if (test_no_idle_hz && shuffle_interval > 0) { 3873 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3874 if (torture_init_error(firsterr)) 3875 goto unwind; 3876 } 3877 if (stutter < 0) 3878 stutter = 0; 3879 if (stutter) { 3880 int t; 3881 3882 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3883 firsterr = torture_stutter_init(stutter * HZ, t); 3884 if (torture_init_error(firsterr)) 3885 goto unwind; 3886 } 3887 if (fqs_duration < 0) 3888 fqs_duration = 0; 3889 if (fqs_holdoff < 0) 3890 fqs_holdoff = 0; 3891 if (fqs_duration && fqs_holdoff) { 3892 /* Create the fqs thread */ 3893 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3894 fqs_task); 3895 if (torture_init_error(firsterr)) 3896 goto unwind; 3897 } 3898 if (test_boost_interval < 1) 3899 test_boost_interval = 1; 3900 if (test_boost_duration < 2) 3901 test_boost_duration = 2; 3902 if (rcu_torture_can_boost()) { 3903 3904 boost_starttime = jiffies + test_boost_interval * HZ; 3905 3906 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3907 rcutorture_booster_init, 3908 rcutorture_booster_cleanup); 3909 rcutor_hp = firsterr; 3910 if (torture_init_error(firsterr)) 3911 goto unwind; 3912 } 3913 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3914 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3915 if (torture_init_error(firsterr)) 3916 goto unwind; 3917 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3918 rcutorture_sync); 3919 if (torture_init_error(firsterr)) 3920 goto unwind; 3921 firsterr = rcu_torture_stall_init(); 3922 if (torture_init_error(firsterr)) 3923 goto unwind; 3924 firsterr = rcu_torture_fwd_prog_init(); 3925 if (torture_init_error(firsterr)) 3926 goto unwind; 3927 firsterr = rcu_torture_barrier_init(); 3928 if (torture_init_error(firsterr)) 3929 goto unwind; 3930 firsterr = rcu_torture_read_exit_init(); 3931 if (torture_init_error(firsterr)) 3932 goto unwind; 3933 if (object_debug) 3934 rcu_test_debug_objects(); 3935 torture_init_end(); 3936 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 3937 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 3938 return 0; 3939 3940 unwind: 3941 torture_init_end(); 3942 rcu_torture_cleanup(); 3943 if (shutdown_secs) { 3944 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3945 kernel_power_off(); 3946 } 3947 return firsterr; 3948 } 3949 3950 module_init(rcu_torture_init); 3951 module_exit(rcu_torture_cleanup); 3952