1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 #include <linux/nmi.h> 50 51 #include "rcu.h" 52 53 MODULE_LICENSE("GPL"); 54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 55 56 /* Bits for ->extendables field, extendables param, and related definitions. */ 57 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 58 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 59 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 61 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 62 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 63 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 64 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 65 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 66 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 67 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 68 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 69 #define RCUTORTURE_MAX_EXTEND \ 70 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 71 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 72 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 73 /* Must be power of two minus one. */ 74 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 75 76 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 77 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 78 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 79 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 80 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 81 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 82 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 83 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 84 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 85 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 86 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 87 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 88 torture_param(bool, gp_cond_exp_full, false, 89 "Use conditional/async full-stateexpedited GP wait primitives"); 90 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 91 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 92 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 93 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 94 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 95 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 96 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 97 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 98 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 99 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 100 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 101 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 102 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 103 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 104 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 105 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 106 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 107 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 108 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 109 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 110 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 111 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 112 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 113 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 114 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 115 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 116 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 117 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 118 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 120 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 121 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 122 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 123 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 124 125 static char *torture_type = "rcu"; 126 module_param(torture_type, charp, 0444); 127 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 128 129 static int nrealnocbers; 130 static int nrealreaders; 131 static struct task_struct *writer_task; 132 static struct task_struct **fakewriter_tasks; 133 static struct task_struct **reader_tasks; 134 static struct task_struct **nocb_tasks; 135 static struct task_struct *stats_task; 136 static struct task_struct *fqs_task; 137 static struct task_struct *boost_tasks[NR_CPUS]; 138 static struct task_struct *stall_task; 139 static struct task_struct **fwd_prog_tasks; 140 static struct task_struct **barrier_cbs_tasks; 141 static struct task_struct *barrier_task; 142 static struct task_struct *read_exit_task; 143 144 #define RCU_TORTURE_PIPE_LEN 10 145 146 // Mailbox-like structure to check RCU global memory ordering. 147 struct rcu_torture_reader_check { 148 unsigned long rtc_myloops; 149 int rtc_chkrdr; 150 unsigned long rtc_chkloops; 151 int rtc_ready; 152 struct rcu_torture_reader_check *rtc_assigner; 153 } ____cacheline_internodealigned_in_smp; 154 155 // Update-side data structure used to check RCU readers. 156 struct rcu_torture { 157 struct rcu_head rtort_rcu; 158 int rtort_pipe_count; 159 struct list_head rtort_free; 160 int rtort_mbtest; 161 struct rcu_torture_reader_check *rtort_chkp; 162 }; 163 164 static LIST_HEAD(rcu_torture_freelist); 165 static struct rcu_torture __rcu *rcu_torture_current; 166 static unsigned long rcu_torture_current_version; 167 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 168 static DEFINE_SPINLOCK(rcu_torture_lock); 169 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 170 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 171 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 172 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 173 static atomic_t n_rcu_torture_alloc; 174 static atomic_t n_rcu_torture_alloc_fail; 175 static atomic_t n_rcu_torture_free; 176 static atomic_t n_rcu_torture_mberror; 177 static atomic_t n_rcu_torture_mbchk_fail; 178 static atomic_t n_rcu_torture_mbchk_tries; 179 static atomic_t n_rcu_torture_error; 180 static long n_rcu_torture_barrier_error; 181 static long n_rcu_torture_boost_ktrerror; 182 static long n_rcu_torture_boost_rterror; 183 static long n_rcu_torture_boost_failure; 184 static long n_rcu_torture_boosts; 185 static atomic_long_t n_rcu_torture_timers; 186 static long n_barrier_attempts; 187 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 188 static unsigned long n_read_exits; 189 static struct list_head rcu_torture_removed; 190 static unsigned long shutdown_jiffies; 191 static unsigned long start_gp_seq; 192 static atomic_long_t n_nocb_offload; 193 static atomic_long_t n_nocb_deoffload; 194 195 static int rcu_torture_writer_state; 196 #define RTWS_FIXED_DELAY 0 197 #define RTWS_DELAY 1 198 #define RTWS_REPLACE 2 199 #define RTWS_DEF_FREE 3 200 #define RTWS_EXP_SYNC 4 201 #define RTWS_COND_GET 5 202 #define RTWS_COND_GET_FULL 6 203 #define RTWS_COND_GET_EXP 7 204 #define RTWS_COND_GET_EXP_FULL 8 205 #define RTWS_COND_SYNC 9 206 #define RTWS_COND_SYNC_FULL 10 207 #define RTWS_COND_SYNC_EXP 11 208 #define RTWS_COND_SYNC_EXP_FULL 12 209 #define RTWS_POLL_GET 13 210 #define RTWS_POLL_GET_FULL 14 211 #define RTWS_POLL_GET_EXP 15 212 #define RTWS_POLL_GET_EXP_FULL 16 213 #define RTWS_POLL_WAIT 17 214 #define RTWS_POLL_WAIT_FULL 18 215 #define RTWS_POLL_WAIT_EXP 19 216 #define RTWS_POLL_WAIT_EXP_FULL 20 217 #define RTWS_SYNC 21 218 #define RTWS_STUTTER 22 219 #define RTWS_STOPPING 23 220 static const char * const rcu_torture_writer_state_names[] = { 221 "RTWS_FIXED_DELAY", 222 "RTWS_DELAY", 223 "RTWS_REPLACE", 224 "RTWS_DEF_FREE", 225 "RTWS_EXP_SYNC", 226 "RTWS_COND_GET", 227 "RTWS_COND_GET_FULL", 228 "RTWS_COND_GET_EXP", 229 "RTWS_COND_GET_EXP_FULL", 230 "RTWS_COND_SYNC", 231 "RTWS_COND_SYNC_FULL", 232 "RTWS_COND_SYNC_EXP", 233 "RTWS_COND_SYNC_EXP_FULL", 234 "RTWS_POLL_GET", 235 "RTWS_POLL_GET_FULL", 236 "RTWS_POLL_GET_EXP", 237 "RTWS_POLL_GET_EXP_FULL", 238 "RTWS_POLL_WAIT", 239 "RTWS_POLL_WAIT_FULL", 240 "RTWS_POLL_WAIT_EXP", 241 "RTWS_POLL_WAIT_EXP_FULL", 242 "RTWS_SYNC", 243 "RTWS_STUTTER", 244 "RTWS_STOPPING", 245 }; 246 247 /* Record reader segment types and duration for first failing read. */ 248 struct rt_read_seg { 249 int rt_readstate; 250 unsigned long rt_delay_jiffies; 251 unsigned long rt_delay_ms; 252 unsigned long rt_delay_us; 253 bool rt_preempted; 254 }; 255 static int err_segs_recorded; 256 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 257 static int rt_read_nsegs; 258 259 static const char *rcu_torture_writer_state_getname(void) 260 { 261 unsigned int i = READ_ONCE(rcu_torture_writer_state); 262 263 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 264 return "???"; 265 return rcu_torture_writer_state_names[i]; 266 } 267 268 #ifdef CONFIG_RCU_TRACE 269 static u64 notrace rcu_trace_clock_local(void) 270 { 271 u64 ts = trace_clock_local(); 272 273 (void)do_div(ts, NSEC_PER_USEC); 274 return ts; 275 } 276 #else /* #ifdef CONFIG_RCU_TRACE */ 277 static u64 notrace rcu_trace_clock_local(void) 278 { 279 return 0ULL; 280 } 281 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 282 283 /* 284 * Stop aggressive CPU-hog tests a bit before the end of the test in order 285 * to avoid interfering with test shutdown. 286 */ 287 static bool shutdown_time_arrived(void) 288 { 289 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 290 } 291 292 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 293 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 294 /* and boost task create/destroy. */ 295 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 296 static bool barrier_phase; /* Test phase. */ 297 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 298 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 299 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 300 301 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 302 303 /* 304 * Allocate an element from the rcu_tortures pool. 305 */ 306 static struct rcu_torture * 307 rcu_torture_alloc(void) 308 { 309 struct list_head *p; 310 311 spin_lock_bh(&rcu_torture_lock); 312 if (list_empty(&rcu_torture_freelist)) { 313 atomic_inc(&n_rcu_torture_alloc_fail); 314 spin_unlock_bh(&rcu_torture_lock); 315 return NULL; 316 } 317 atomic_inc(&n_rcu_torture_alloc); 318 p = rcu_torture_freelist.next; 319 list_del_init(p); 320 spin_unlock_bh(&rcu_torture_lock); 321 return container_of(p, struct rcu_torture, rtort_free); 322 } 323 324 /* 325 * Free an element to the rcu_tortures pool. 326 */ 327 static void 328 rcu_torture_free(struct rcu_torture *p) 329 { 330 atomic_inc(&n_rcu_torture_free); 331 spin_lock_bh(&rcu_torture_lock); 332 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 333 spin_unlock_bh(&rcu_torture_lock); 334 } 335 336 /* 337 * Operations vector for selecting different types of tests. 338 */ 339 340 struct rcu_torture_ops { 341 int ttype; 342 void (*init)(void); 343 void (*cleanup)(void); 344 int (*readlock)(void); 345 void (*read_delay)(struct torture_random_state *rrsp, 346 struct rt_read_seg *rtrsp); 347 void (*readunlock)(int idx); 348 int (*readlock_held)(void); 349 unsigned long (*get_gp_seq)(void); 350 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 351 void (*deferred_free)(struct rcu_torture *p); 352 void (*sync)(void); 353 void (*exp_sync)(void); 354 unsigned long (*get_gp_state_exp)(void); 355 unsigned long (*start_gp_poll_exp)(void); 356 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 357 bool (*poll_gp_state_exp)(unsigned long oldstate); 358 void (*cond_sync_exp)(unsigned long oldstate); 359 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 360 unsigned long (*get_gp_state)(void); 361 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 362 unsigned long (*get_gp_completed)(void); 363 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); 364 unsigned long (*start_gp_poll)(void); 365 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 366 bool (*poll_gp_state)(unsigned long oldstate); 367 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 368 bool (*poll_need_2gp)(bool poll, bool poll_full); 369 void (*cond_sync)(unsigned long oldstate); 370 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 371 call_rcu_func_t call; 372 void (*cb_barrier)(void); 373 void (*fqs)(void); 374 void (*stats)(void); 375 void (*gp_kthread_dbg)(void); 376 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 377 int (*stall_dur)(void); 378 long cbflood_max; 379 int irq_capable; 380 int can_boost; 381 int extendables; 382 int slow_gps; 383 int no_pi_lock; 384 const char *name; 385 }; 386 387 static struct rcu_torture_ops *cur_ops; 388 389 /* 390 * Definitions for rcu torture testing. 391 */ 392 393 static int torture_readlock_not_held(void) 394 { 395 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 396 } 397 398 static int rcu_torture_read_lock(void) __acquires(RCU) 399 { 400 rcu_read_lock(); 401 return 0; 402 } 403 404 static void 405 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 406 { 407 unsigned long started; 408 unsigned long completed; 409 const unsigned long shortdelay_us = 200; 410 unsigned long longdelay_ms = 300; 411 unsigned long long ts; 412 413 /* We want a short delay sometimes to make a reader delay the grace 414 * period, and we want a long delay occasionally to trigger 415 * force_quiescent_state. */ 416 417 if (!atomic_read(&rcu_fwd_cb_nodelay) && 418 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 419 started = cur_ops->get_gp_seq(); 420 ts = rcu_trace_clock_local(); 421 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 422 longdelay_ms = 5; /* Avoid triggering BH limits. */ 423 mdelay(longdelay_ms); 424 rtrsp->rt_delay_ms = longdelay_ms; 425 completed = cur_ops->get_gp_seq(); 426 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 427 started, completed); 428 } 429 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 430 udelay(shortdelay_us); 431 rtrsp->rt_delay_us = shortdelay_us; 432 } 433 if (!preempt_count() && 434 !(torture_random(rrsp) % (nrealreaders * 500))) { 435 torture_preempt_schedule(); /* QS only if preemptible. */ 436 rtrsp->rt_preempted = true; 437 } 438 } 439 440 static void rcu_torture_read_unlock(int idx) __releases(RCU) 441 { 442 rcu_read_unlock(); 443 } 444 445 /* 446 * Update callback in the pipe. This should be invoked after a grace period. 447 */ 448 static bool 449 rcu_torture_pipe_update_one(struct rcu_torture *rp) 450 { 451 int i; 452 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 453 454 if (rtrcp) { 455 WRITE_ONCE(rp->rtort_chkp, NULL); 456 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 457 } 458 i = READ_ONCE(rp->rtort_pipe_count); 459 if (i > RCU_TORTURE_PIPE_LEN) 460 i = RCU_TORTURE_PIPE_LEN; 461 atomic_inc(&rcu_torture_wcount[i]); 462 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 463 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 464 rp->rtort_mbtest = 0; 465 return true; 466 } 467 return false; 468 } 469 470 /* 471 * Update all callbacks in the pipe. Suitable for synchronous grace-period 472 * primitives. 473 */ 474 static void 475 rcu_torture_pipe_update(struct rcu_torture *old_rp) 476 { 477 struct rcu_torture *rp; 478 struct rcu_torture *rp1; 479 480 if (old_rp) 481 list_add(&old_rp->rtort_free, &rcu_torture_removed); 482 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 483 if (rcu_torture_pipe_update_one(rp)) { 484 list_del(&rp->rtort_free); 485 rcu_torture_free(rp); 486 } 487 } 488 } 489 490 static void 491 rcu_torture_cb(struct rcu_head *p) 492 { 493 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 494 495 if (torture_must_stop_irq()) { 496 /* Test is ending, just drop callbacks on the floor. */ 497 /* The next initialization will pick up the pieces. */ 498 return; 499 } 500 if (rcu_torture_pipe_update_one(rp)) 501 rcu_torture_free(rp); 502 else 503 cur_ops->deferred_free(rp); 504 } 505 506 static unsigned long rcu_no_completed(void) 507 { 508 return 0; 509 } 510 511 static void rcu_torture_deferred_free(struct rcu_torture *p) 512 { 513 call_rcu(&p->rtort_rcu, rcu_torture_cb); 514 } 515 516 static void rcu_sync_torture_init(void) 517 { 518 INIT_LIST_HEAD(&rcu_torture_removed); 519 } 520 521 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 522 { 523 return poll; 524 } 525 526 static struct rcu_torture_ops rcu_ops = { 527 .ttype = RCU_FLAVOR, 528 .init = rcu_sync_torture_init, 529 .readlock = rcu_torture_read_lock, 530 .read_delay = rcu_read_delay, 531 .readunlock = rcu_torture_read_unlock, 532 .readlock_held = torture_readlock_not_held, 533 .get_gp_seq = rcu_get_gp_seq, 534 .gp_diff = rcu_seq_diff, 535 .deferred_free = rcu_torture_deferred_free, 536 .sync = synchronize_rcu, 537 .exp_sync = synchronize_rcu_expedited, 538 .get_gp_state = get_state_synchronize_rcu, 539 .get_gp_state_full = get_state_synchronize_rcu_full, 540 .get_gp_completed = get_completed_synchronize_rcu, 541 .get_gp_completed_full = get_completed_synchronize_rcu_full, 542 .start_gp_poll = start_poll_synchronize_rcu, 543 .start_gp_poll_full = start_poll_synchronize_rcu_full, 544 .poll_gp_state = poll_state_synchronize_rcu, 545 .poll_gp_state_full = poll_state_synchronize_rcu_full, 546 .poll_need_2gp = rcu_poll_need_2gp, 547 .cond_sync = cond_synchronize_rcu, 548 .cond_sync_full = cond_synchronize_rcu_full, 549 .get_gp_state_exp = get_state_synchronize_rcu, 550 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 551 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 552 .poll_gp_state_exp = poll_state_synchronize_rcu, 553 .cond_sync_exp = cond_synchronize_rcu_expedited, 554 .call = call_rcu, 555 .cb_barrier = rcu_barrier, 556 .fqs = rcu_force_quiescent_state, 557 .stats = NULL, 558 .gp_kthread_dbg = show_rcu_gp_kthreads, 559 .check_boost_failed = rcu_check_boost_fail, 560 .stall_dur = rcu_jiffies_till_stall_check, 561 .irq_capable = 1, 562 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 563 .extendables = RCUTORTURE_MAX_EXTEND, 564 .name = "rcu" 565 }; 566 567 /* 568 * Don't even think about trying any of these in real life!!! 569 * The names includes "busted", and they really means it! 570 * The only purpose of these functions is to provide a buggy RCU 571 * implementation to make sure that rcutorture correctly emits 572 * buggy-RCU error messages. 573 */ 574 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 575 { 576 /* This is a deliberate bug for testing purposes only! */ 577 rcu_torture_cb(&p->rtort_rcu); 578 } 579 580 static void synchronize_rcu_busted(void) 581 { 582 /* This is a deliberate bug for testing purposes only! */ 583 } 584 585 static void 586 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 587 { 588 /* This is a deliberate bug for testing purposes only! */ 589 func(head); 590 } 591 592 static struct rcu_torture_ops rcu_busted_ops = { 593 .ttype = INVALID_RCU_FLAVOR, 594 .init = rcu_sync_torture_init, 595 .readlock = rcu_torture_read_lock, 596 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 597 .readunlock = rcu_torture_read_unlock, 598 .readlock_held = torture_readlock_not_held, 599 .get_gp_seq = rcu_no_completed, 600 .deferred_free = rcu_busted_torture_deferred_free, 601 .sync = synchronize_rcu_busted, 602 .exp_sync = synchronize_rcu_busted, 603 .call = call_rcu_busted, 604 .cb_barrier = NULL, 605 .fqs = NULL, 606 .stats = NULL, 607 .irq_capable = 1, 608 .name = "busted" 609 }; 610 611 /* 612 * Definitions for srcu torture testing. 613 */ 614 615 DEFINE_STATIC_SRCU(srcu_ctl); 616 static struct srcu_struct srcu_ctld; 617 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 618 619 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 620 { 621 return srcu_read_lock(srcu_ctlp); 622 } 623 624 static void 625 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 626 { 627 long delay; 628 const long uspertick = 1000000 / HZ; 629 const long longdelay = 10; 630 631 /* We want there to be long-running readers, but not all the time. */ 632 633 delay = torture_random(rrsp) % 634 (nrealreaders * 2 * longdelay * uspertick); 635 if (!delay && in_task()) { 636 schedule_timeout_interruptible(longdelay); 637 rtrsp->rt_delay_jiffies = longdelay; 638 } else { 639 rcu_read_delay(rrsp, rtrsp); 640 } 641 } 642 643 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 644 { 645 srcu_read_unlock(srcu_ctlp, idx); 646 } 647 648 static int torture_srcu_read_lock_held(void) 649 { 650 return srcu_read_lock_held(srcu_ctlp); 651 } 652 653 static unsigned long srcu_torture_completed(void) 654 { 655 return srcu_batches_completed(srcu_ctlp); 656 } 657 658 static void srcu_torture_deferred_free(struct rcu_torture *rp) 659 { 660 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 661 } 662 663 static void srcu_torture_synchronize(void) 664 { 665 synchronize_srcu(srcu_ctlp); 666 } 667 668 static unsigned long srcu_torture_get_gp_state(void) 669 { 670 return get_state_synchronize_srcu(srcu_ctlp); 671 } 672 673 static unsigned long srcu_torture_start_gp_poll(void) 674 { 675 return start_poll_synchronize_srcu(srcu_ctlp); 676 } 677 678 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 679 { 680 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 681 } 682 683 static void srcu_torture_call(struct rcu_head *head, 684 rcu_callback_t func) 685 { 686 call_srcu(srcu_ctlp, head, func); 687 } 688 689 static void srcu_torture_barrier(void) 690 { 691 srcu_barrier(srcu_ctlp); 692 } 693 694 static void srcu_torture_stats(void) 695 { 696 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 697 } 698 699 static void srcu_torture_synchronize_expedited(void) 700 { 701 synchronize_srcu_expedited(srcu_ctlp); 702 } 703 704 static struct rcu_torture_ops srcu_ops = { 705 .ttype = SRCU_FLAVOR, 706 .init = rcu_sync_torture_init, 707 .readlock = srcu_torture_read_lock, 708 .read_delay = srcu_read_delay, 709 .readunlock = srcu_torture_read_unlock, 710 .readlock_held = torture_srcu_read_lock_held, 711 .get_gp_seq = srcu_torture_completed, 712 .deferred_free = srcu_torture_deferred_free, 713 .sync = srcu_torture_synchronize, 714 .exp_sync = srcu_torture_synchronize_expedited, 715 .get_gp_state = srcu_torture_get_gp_state, 716 .start_gp_poll = srcu_torture_start_gp_poll, 717 .poll_gp_state = srcu_torture_poll_gp_state, 718 .call = srcu_torture_call, 719 .cb_barrier = srcu_torture_barrier, 720 .stats = srcu_torture_stats, 721 .cbflood_max = 50000, 722 .irq_capable = 1, 723 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 724 .name = "srcu" 725 }; 726 727 static void srcu_torture_init(void) 728 { 729 rcu_sync_torture_init(); 730 WARN_ON(init_srcu_struct(&srcu_ctld)); 731 srcu_ctlp = &srcu_ctld; 732 } 733 734 static void srcu_torture_cleanup(void) 735 { 736 cleanup_srcu_struct(&srcu_ctld); 737 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 738 } 739 740 /* As above, but dynamically allocated. */ 741 static struct rcu_torture_ops srcud_ops = { 742 .ttype = SRCU_FLAVOR, 743 .init = srcu_torture_init, 744 .cleanup = srcu_torture_cleanup, 745 .readlock = srcu_torture_read_lock, 746 .read_delay = srcu_read_delay, 747 .readunlock = srcu_torture_read_unlock, 748 .readlock_held = torture_srcu_read_lock_held, 749 .get_gp_seq = srcu_torture_completed, 750 .deferred_free = srcu_torture_deferred_free, 751 .sync = srcu_torture_synchronize, 752 .exp_sync = srcu_torture_synchronize_expedited, 753 .get_gp_state = srcu_torture_get_gp_state, 754 .start_gp_poll = srcu_torture_start_gp_poll, 755 .poll_gp_state = srcu_torture_poll_gp_state, 756 .call = srcu_torture_call, 757 .cb_barrier = srcu_torture_barrier, 758 .stats = srcu_torture_stats, 759 .cbflood_max = 50000, 760 .irq_capable = 1, 761 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 762 .name = "srcud" 763 }; 764 765 /* As above, but broken due to inappropriate reader extension. */ 766 static struct rcu_torture_ops busted_srcud_ops = { 767 .ttype = SRCU_FLAVOR, 768 .init = srcu_torture_init, 769 .cleanup = srcu_torture_cleanup, 770 .readlock = srcu_torture_read_lock, 771 .read_delay = rcu_read_delay, 772 .readunlock = srcu_torture_read_unlock, 773 .readlock_held = torture_srcu_read_lock_held, 774 .get_gp_seq = srcu_torture_completed, 775 .deferred_free = srcu_torture_deferred_free, 776 .sync = srcu_torture_synchronize, 777 .exp_sync = srcu_torture_synchronize_expedited, 778 .call = srcu_torture_call, 779 .cb_barrier = srcu_torture_barrier, 780 .stats = srcu_torture_stats, 781 .irq_capable = 1, 782 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 783 .extendables = RCUTORTURE_MAX_EXTEND, 784 .name = "busted_srcud" 785 }; 786 787 /* 788 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 789 * This implementation does not necessarily work well with CPU hotplug. 790 */ 791 792 static void synchronize_rcu_trivial(void) 793 { 794 int cpu; 795 796 for_each_online_cpu(cpu) { 797 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 798 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 799 } 800 } 801 802 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 803 { 804 preempt_disable(); 805 return 0; 806 } 807 808 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 809 { 810 preempt_enable(); 811 } 812 813 static struct rcu_torture_ops trivial_ops = { 814 .ttype = RCU_TRIVIAL_FLAVOR, 815 .init = rcu_sync_torture_init, 816 .readlock = rcu_torture_read_lock_trivial, 817 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 818 .readunlock = rcu_torture_read_unlock_trivial, 819 .readlock_held = torture_readlock_not_held, 820 .get_gp_seq = rcu_no_completed, 821 .sync = synchronize_rcu_trivial, 822 .exp_sync = synchronize_rcu_trivial, 823 .fqs = NULL, 824 .stats = NULL, 825 .irq_capable = 1, 826 .name = "trivial" 827 }; 828 829 #ifdef CONFIG_TASKS_RCU 830 831 /* 832 * Definitions for RCU-tasks torture testing. 833 */ 834 835 static int tasks_torture_read_lock(void) 836 { 837 return 0; 838 } 839 840 static void tasks_torture_read_unlock(int idx) 841 { 842 } 843 844 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 845 { 846 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 847 } 848 849 static void synchronize_rcu_mult_test(void) 850 { 851 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 852 } 853 854 static struct rcu_torture_ops tasks_ops = { 855 .ttype = RCU_TASKS_FLAVOR, 856 .init = rcu_sync_torture_init, 857 .readlock = tasks_torture_read_lock, 858 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 859 .readunlock = tasks_torture_read_unlock, 860 .get_gp_seq = rcu_no_completed, 861 .deferred_free = rcu_tasks_torture_deferred_free, 862 .sync = synchronize_rcu_tasks, 863 .exp_sync = synchronize_rcu_mult_test, 864 .call = call_rcu_tasks, 865 .cb_barrier = rcu_barrier_tasks, 866 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 867 .fqs = NULL, 868 .stats = NULL, 869 .irq_capable = 1, 870 .slow_gps = 1, 871 .name = "tasks" 872 }; 873 874 #define TASKS_OPS &tasks_ops, 875 876 #else // #ifdef CONFIG_TASKS_RCU 877 878 #define TASKS_OPS 879 880 #endif // #else #ifdef CONFIG_TASKS_RCU 881 882 883 #ifdef CONFIG_TASKS_RUDE_RCU 884 885 /* 886 * Definitions for rude RCU-tasks torture testing. 887 */ 888 889 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 890 { 891 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 892 } 893 894 static struct rcu_torture_ops tasks_rude_ops = { 895 .ttype = RCU_TASKS_RUDE_FLAVOR, 896 .init = rcu_sync_torture_init, 897 .readlock = rcu_torture_read_lock_trivial, 898 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 899 .readunlock = rcu_torture_read_unlock_trivial, 900 .get_gp_seq = rcu_no_completed, 901 .deferred_free = rcu_tasks_rude_torture_deferred_free, 902 .sync = synchronize_rcu_tasks_rude, 903 .exp_sync = synchronize_rcu_tasks_rude, 904 .call = call_rcu_tasks_rude, 905 .cb_barrier = rcu_barrier_tasks_rude, 906 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 907 .cbflood_max = 50000, 908 .fqs = NULL, 909 .stats = NULL, 910 .irq_capable = 1, 911 .name = "tasks-rude" 912 }; 913 914 #define TASKS_RUDE_OPS &tasks_rude_ops, 915 916 #else // #ifdef CONFIG_TASKS_RUDE_RCU 917 918 #define TASKS_RUDE_OPS 919 920 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 921 922 923 #ifdef CONFIG_TASKS_TRACE_RCU 924 925 /* 926 * Definitions for tracing RCU-tasks torture testing. 927 */ 928 929 static int tasks_tracing_torture_read_lock(void) 930 { 931 rcu_read_lock_trace(); 932 return 0; 933 } 934 935 static void tasks_tracing_torture_read_unlock(int idx) 936 { 937 rcu_read_unlock_trace(); 938 } 939 940 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 941 { 942 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 943 } 944 945 static struct rcu_torture_ops tasks_tracing_ops = { 946 .ttype = RCU_TASKS_TRACING_FLAVOR, 947 .init = rcu_sync_torture_init, 948 .readlock = tasks_tracing_torture_read_lock, 949 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 950 .readunlock = tasks_tracing_torture_read_unlock, 951 .readlock_held = rcu_read_lock_trace_held, 952 .get_gp_seq = rcu_no_completed, 953 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 954 .sync = synchronize_rcu_tasks_trace, 955 .exp_sync = synchronize_rcu_tasks_trace, 956 .call = call_rcu_tasks_trace, 957 .cb_barrier = rcu_barrier_tasks_trace, 958 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 959 .cbflood_max = 50000, 960 .fqs = NULL, 961 .stats = NULL, 962 .irq_capable = 1, 963 .slow_gps = 1, 964 .name = "tasks-tracing" 965 }; 966 967 #define TASKS_TRACING_OPS &tasks_tracing_ops, 968 969 #else // #ifdef CONFIG_TASKS_TRACE_RCU 970 971 #define TASKS_TRACING_OPS 972 973 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 974 975 976 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 977 { 978 if (!cur_ops->gp_diff) 979 return new - old; 980 return cur_ops->gp_diff(new, old); 981 } 982 983 /* 984 * RCU torture priority-boost testing. Runs one real-time thread per 985 * CPU for moderate bursts, repeatedly starting grace periods and waiting 986 * for them to complete. If a given grace period takes too long, we assume 987 * that priority inversion has occurred. 988 */ 989 990 static int old_rt_runtime = -1; 991 992 static void rcu_torture_disable_rt_throttle(void) 993 { 994 /* 995 * Disable RT throttling so that rcutorture's boost threads don't get 996 * throttled. Only possible if rcutorture is built-in otherwise the 997 * user should manually do this by setting the sched_rt_period_us and 998 * sched_rt_runtime sysctls. 999 */ 1000 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1001 return; 1002 1003 old_rt_runtime = sysctl_sched_rt_runtime; 1004 sysctl_sched_rt_runtime = -1; 1005 } 1006 1007 static void rcu_torture_enable_rt_throttle(void) 1008 { 1009 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1010 return; 1011 1012 sysctl_sched_rt_runtime = old_rt_runtime; 1013 old_rt_runtime = -1; 1014 } 1015 1016 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1017 { 1018 int cpu; 1019 static int dbg_done; 1020 unsigned long end = jiffies; 1021 bool gp_done; 1022 unsigned long j; 1023 static unsigned long last_persist; 1024 unsigned long lp; 1025 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1026 1027 if (end - *start > mininterval) { 1028 // Recheck after checking time to avoid false positives. 1029 smp_mb(); // Time check before grace-period check. 1030 if (cur_ops->poll_gp_state(gp_state)) 1031 return false; // passed, though perhaps just barely 1032 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1033 // At most one persisted message per boost test. 1034 j = jiffies; 1035 lp = READ_ONCE(last_persist); 1036 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1037 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1038 return false; // passed on a technicality 1039 } 1040 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1041 n_rcu_torture_boost_failure++; 1042 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1043 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1044 current->rt_priority, gp_state, end - *start); 1045 cur_ops->gp_kthread_dbg(); 1046 // Recheck after print to flag grace period ending during splat. 1047 gp_done = cur_ops->poll_gp_state(gp_state); 1048 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1049 gp_done ? "ended already" : "still pending"); 1050 1051 } 1052 1053 return true; // failed 1054 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1055 *start = jiffies; 1056 } 1057 1058 return false; // passed 1059 } 1060 1061 static int rcu_torture_boost(void *arg) 1062 { 1063 unsigned long endtime; 1064 unsigned long gp_state; 1065 unsigned long gp_state_time; 1066 unsigned long oldstarttime; 1067 1068 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1069 1070 /* Set real-time priority. */ 1071 sched_set_fifo_low(current); 1072 1073 /* Each pass through the following loop does one boost-test cycle. */ 1074 do { 1075 bool failed = false; // Test failed already in this test interval 1076 bool gp_initiated = false; 1077 1078 if (kthread_should_stop()) 1079 goto checkwait; 1080 1081 /* Wait for the next test interval. */ 1082 oldstarttime = READ_ONCE(boost_starttime); 1083 while (time_before(jiffies, oldstarttime)) { 1084 schedule_timeout_interruptible(oldstarttime - jiffies); 1085 if (stutter_wait("rcu_torture_boost")) 1086 sched_set_fifo_low(current); 1087 if (torture_must_stop()) 1088 goto checkwait; 1089 } 1090 1091 // Do one boost-test interval. 1092 endtime = oldstarttime + test_boost_duration * HZ; 1093 while (time_before(jiffies, endtime)) { 1094 // Has current GP gone too long? 1095 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1096 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1097 // If we don't have a grace period in flight, start one. 1098 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1099 gp_state = cur_ops->start_gp_poll(); 1100 gp_initiated = true; 1101 gp_state_time = jiffies; 1102 } 1103 if (stutter_wait("rcu_torture_boost")) { 1104 sched_set_fifo_low(current); 1105 // If the grace period already ended, 1106 // we don't know when that happened, so 1107 // start over. 1108 if (cur_ops->poll_gp_state(gp_state)) 1109 gp_initiated = false; 1110 } 1111 if (torture_must_stop()) 1112 goto checkwait; 1113 } 1114 1115 // In case the grace period extended beyond the end of the loop. 1116 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1117 rcu_torture_boost_failed(gp_state, &gp_state_time); 1118 1119 /* 1120 * Set the start time of the next test interval. 1121 * Yes, this is vulnerable to long delays, but such 1122 * delays simply cause a false negative for the next 1123 * interval. Besides, we are running at RT priority, 1124 * so delays should be relatively rare. 1125 */ 1126 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1127 if (mutex_trylock(&boost_mutex)) { 1128 if (oldstarttime == boost_starttime) { 1129 WRITE_ONCE(boost_starttime, 1130 jiffies + test_boost_interval * HZ); 1131 n_rcu_torture_boosts++; 1132 } 1133 mutex_unlock(&boost_mutex); 1134 break; 1135 } 1136 schedule_timeout_uninterruptible(1); 1137 } 1138 1139 /* Go do the stutter. */ 1140 checkwait: if (stutter_wait("rcu_torture_boost")) 1141 sched_set_fifo_low(current); 1142 } while (!torture_must_stop()); 1143 1144 /* Clean up and exit. */ 1145 while (!kthread_should_stop()) { 1146 torture_shutdown_absorb("rcu_torture_boost"); 1147 schedule_timeout_uninterruptible(1); 1148 } 1149 torture_kthread_stopping("rcu_torture_boost"); 1150 return 0; 1151 } 1152 1153 /* 1154 * RCU torture force-quiescent-state kthread. Repeatedly induces 1155 * bursts of calls to force_quiescent_state(), increasing the probability 1156 * of occurrence of some important types of race conditions. 1157 */ 1158 static int 1159 rcu_torture_fqs(void *arg) 1160 { 1161 unsigned long fqs_resume_time; 1162 int fqs_burst_remaining; 1163 int oldnice = task_nice(current); 1164 1165 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1166 do { 1167 fqs_resume_time = jiffies + fqs_stutter * HZ; 1168 while (time_before(jiffies, fqs_resume_time) && 1169 !kthread_should_stop()) { 1170 schedule_timeout_interruptible(1); 1171 } 1172 fqs_burst_remaining = fqs_duration; 1173 while (fqs_burst_remaining > 0 && 1174 !kthread_should_stop()) { 1175 cur_ops->fqs(); 1176 udelay(fqs_holdoff); 1177 fqs_burst_remaining -= fqs_holdoff; 1178 } 1179 if (stutter_wait("rcu_torture_fqs")) 1180 sched_set_normal(current, oldnice); 1181 } while (!torture_must_stop()); 1182 torture_kthread_stopping("rcu_torture_fqs"); 1183 return 0; 1184 } 1185 1186 // Used by writers to randomly choose from the available grace-period primitives. 1187 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1188 static int nsynctypes; 1189 1190 /* 1191 * Determine which grace-period primitives are available. 1192 */ 1193 static void rcu_torture_write_types(void) 1194 { 1195 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1196 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1197 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1198 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1199 1200 /* Initialize synctype[] array. If none set, take default. */ 1201 if (!gp_cond1 && 1202 !gp_cond_exp1 && 1203 !gp_cond_full1 && 1204 !gp_cond_exp_full1 && 1205 !gp_exp1 && 1206 !gp_poll_exp1 && 1207 !gp_poll_exp_full1 && 1208 !gp_normal1 && 1209 !gp_poll1 && 1210 !gp_poll_full1 && 1211 !gp_sync1) { 1212 gp_cond1 = true; 1213 gp_cond_exp1 = true; 1214 gp_cond_full1 = true; 1215 gp_cond_exp_full1 = true; 1216 gp_exp1 = true; 1217 gp_poll_exp1 = true; 1218 gp_poll_exp_full1 = true; 1219 gp_normal1 = true; 1220 gp_poll1 = true; 1221 gp_poll_full1 = true; 1222 gp_sync1 = true; 1223 } 1224 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1225 synctype[nsynctypes++] = RTWS_COND_GET; 1226 pr_info("%s: Testing conditional GPs.\n", __func__); 1227 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1228 pr_alert("%s: gp_cond without primitives.\n", __func__); 1229 } 1230 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1231 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1232 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1233 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1234 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1235 } 1236 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1237 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1238 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1239 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1240 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1241 } 1242 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1243 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1244 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1245 } else if (gp_cond_exp_full && 1246 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1247 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1248 } 1249 if (gp_exp1 && cur_ops->exp_sync) { 1250 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1251 pr_info("%s: Testing expedited GPs.\n", __func__); 1252 } else if (gp_exp && !cur_ops->exp_sync) { 1253 pr_alert("%s: gp_exp without primitives.\n", __func__); 1254 } 1255 if (gp_normal1 && cur_ops->deferred_free) { 1256 synctype[nsynctypes++] = RTWS_DEF_FREE; 1257 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1258 } else if (gp_normal && !cur_ops->deferred_free) { 1259 pr_alert("%s: gp_normal without primitives.\n", __func__); 1260 } 1261 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1262 synctype[nsynctypes++] = RTWS_POLL_GET; 1263 pr_info("%s: Testing polling GPs.\n", __func__); 1264 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1265 pr_alert("%s: gp_poll without primitives.\n", __func__); 1266 } 1267 if (gp_poll_full1 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1268 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1269 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1270 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1271 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1272 } 1273 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1274 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1275 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1276 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1277 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1278 } 1279 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1280 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1281 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1282 } else if (gp_poll_exp_full && 1283 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1284 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1285 } 1286 if (gp_sync1 && cur_ops->sync) { 1287 synctype[nsynctypes++] = RTWS_SYNC; 1288 pr_info("%s: Testing normal GPs.\n", __func__); 1289 } else if (gp_sync && !cur_ops->sync) { 1290 pr_alert("%s: gp_sync without primitives.\n", __func__); 1291 } 1292 } 1293 1294 /* 1295 * Do the specified rcu_torture_writer() synchronous grace period, 1296 * while also testing out the polled APIs. Note well that the single-CPU 1297 * grace-period optimizations must be accounted for. 1298 */ 1299 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1300 { 1301 unsigned long cookie; 1302 struct rcu_gp_oldstate cookie_full; 1303 bool dopoll; 1304 bool dopoll_full; 1305 unsigned long r = torture_random(trsp); 1306 1307 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1308 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1309 if (dopoll || dopoll_full) 1310 cpus_read_lock(); 1311 if (dopoll) 1312 cookie = cur_ops->get_gp_state(); 1313 if (dopoll_full) 1314 cur_ops->get_gp_state_full(&cookie_full); 1315 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1316 sync(); 1317 sync(); 1318 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1319 "%s: Cookie check 3 failed %pS() online %*pbl.", 1320 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1321 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1322 "%s: Cookie check 4 failed %pS() online %*pbl", 1323 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1324 if (dopoll || dopoll_full) 1325 cpus_read_unlock(); 1326 } 1327 1328 /* 1329 * RCU torture writer kthread. Repeatedly substitutes a new structure 1330 * for that pointed to by rcu_torture_current, freeing the old structure 1331 * after a series of grace periods (the "pipeline"). 1332 */ 1333 static int 1334 rcu_torture_writer(void *arg) 1335 { 1336 bool boot_ended; 1337 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1338 unsigned long cookie; 1339 struct rcu_gp_oldstate cookie_full; 1340 int expediting = 0; 1341 unsigned long gp_snap; 1342 struct rcu_gp_oldstate gp_snap_full; 1343 int i; 1344 int idx; 1345 int oldnice = task_nice(current); 1346 struct rcu_torture *rp; 1347 struct rcu_torture *old_rp; 1348 static DEFINE_TORTURE_RANDOM(rand); 1349 bool stutter_waited; 1350 1351 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1352 if (!can_expedite) 1353 pr_alert("%s" TORTURE_FLAG 1354 " GP expediting controlled from boot/sysfs for %s.\n", 1355 torture_type, cur_ops->name); 1356 if (WARN_ONCE(nsynctypes == 0, 1357 "%s: No update-side primitives.\n", __func__)) { 1358 /* 1359 * No updates primitives, so don't try updating. 1360 * The resulting test won't be testing much, hence the 1361 * above WARN_ONCE(). 1362 */ 1363 rcu_torture_writer_state = RTWS_STOPPING; 1364 torture_kthread_stopping("rcu_torture_writer"); 1365 return 0; 1366 } 1367 1368 do { 1369 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1370 torture_hrtimeout_us(500, 1000, &rand); 1371 rp = rcu_torture_alloc(); 1372 if (rp == NULL) 1373 continue; 1374 rp->rtort_pipe_count = 0; 1375 rcu_torture_writer_state = RTWS_DELAY; 1376 udelay(torture_random(&rand) & 0x3ff); 1377 rcu_torture_writer_state = RTWS_REPLACE; 1378 old_rp = rcu_dereference_check(rcu_torture_current, 1379 current == writer_task); 1380 rp->rtort_mbtest = 1; 1381 rcu_assign_pointer(rcu_torture_current, rp); 1382 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1383 if (old_rp) { 1384 i = old_rp->rtort_pipe_count; 1385 if (i > RCU_TORTURE_PIPE_LEN) 1386 i = RCU_TORTURE_PIPE_LEN; 1387 atomic_inc(&rcu_torture_wcount[i]); 1388 WRITE_ONCE(old_rp->rtort_pipe_count, 1389 old_rp->rtort_pipe_count + 1); 1390 1391 // Make sure readers block polled grace periods. 1392 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1393 idx = cur_ops->readlock(); 1394 cookie = cur_ops->get_gp_state(); 1395 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1396 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1397 __func__, 1398 rcu_torture_writer_state_getname(), 1399 rcu_torture_writer_state, 1400 cookie, cur_ops->get_gp_state()); 1401 if (cur_ops->get_gp_completed) { 1402 cookie = cur_ops->get_gp_completed(); 1403 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1404 } 1405 cur_ops->readunlock(idx); 1406 } 1407 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1408 idx = cur_ops->readlock(); 1409 cur_ops->get_gp_state_full(&cookie_full); 1410 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1411 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1412 __func__, 1413 rcu_torture_writer_state_getname(), 1414 rcu_torture_writer_state, 1415 cpumask_pr_args(cpu_online_mask)); 1416 if (cur_ops->get_gp_completed_full) { 1417 cur_ops->get_gp_completed_full(&cookie_full); 1418 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1419 } 1420 cur_ops->readunlock(idx); 1421 } 1422 switch (synctype[torture_random(&rand) % nsynctypes]) { 1423 case RTWS_DEF_FREE: 1424 rcu_torture_writer_state = RTWS_DEF_FREE; 1425 cur_ops->deferred_free(old_rp); 1426 break; 1427 case RTWS_EXP_SYNC: 1428 rcu_torture_writer_state = RTWS_EXP_SYNC; 1429 do_rtws_sync(&rand, cur_ops->exp_sync); 1430 rcu_torture_pipe_update(old_rp); 1431 break; 1432 case RTWS_COND_GET: 1433 rcu_torture_writer_state = RTWS_COND_GET; 1434 gp_snap = cur_ops->get_gp_state(); 1435 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1436 rcu_torture_writer_state = RTWS_COND_SYNC; 1437 cur_ops->cond_sync(gp_snap); 1438 rcu_torture_pipe_update(old_rp); 1439 break; 1440 case RTWS_COND_GET_EXP: 1441 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1442 gp_snap = cur_ops->get_gp_state_exp(); 1443 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1444 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1445 cur_ops->cond_sync_exp(gp_snap); 1446 rcu_torture_pipe_update(old_rp); 1447 break; 1448 case RTWS_COND_GET_FULL: 1449 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1450 cur_ops->get_gp_state_full(&gp_snap_full); 1451 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1452 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1453 cur_ops->cond_sync_full(&gp_snap_full); 1454 rcu_torture_pipe_update(old_rp); 1455 break; 1456 case RTWS_COND_GET_EXP_FULL: 1457 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1458 cur_ops->get_gp_state_full(&gp_snap_full); 1459 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1460 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1461 cur_ops->cond_sync_exp_full(&gp_snap_full); 1462 rcu_torture_pipe_update(old_rp); 1463 break; 1464 case RTWS_POLL_GET: 1465 rcu_torture_writer_state = RTWS_POLL_GET; 1466 gp_snap = cur_ops->start_gp_poll(); 1467 rcu_torture_writer_state = RTWS_POLL_WAIT; 1468 while (!cur_ops->poll_gp_state(gp_snap)) 1469 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1470 &rand); 1471 rcu_torture_pipe_update(old_rp); 1472 break; 1473 case RTWS_POLL_GET_FULL: 1474 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1475 cur_ops->start_gp_poll_full(&gp_snap_full); 1476 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1477 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1478 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1479 &rand); 1480 rcu_torture_pipe_update(old_rp); 1481 break; 1482 case RTWS_POLL_GET_EXP: 1483 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1484 gp_snap = cur_ops->start_gp_poll_exp(); 1485 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1486 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1487 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1488 &rand); 1489 rcu_torture_pipe_update(old_rp); 1490 break; 1491 case RTWS_POLL_GET_EXP_FULL: 1492 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1493 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1494 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1495 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1496 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1497 &rand); 1498 rcu_torture_pipe_update(old_rp); 1499 break; 1500 case RTWS_SYNC: 1501 rcu_torture_writer_state = RTWS_SYNC; 1502 do_rtws_sync(&rand, cur_ops->sync); 1503 rcu_torture_pipe_update(old_rp); 1504 break; 1505 default: 1506 WARN_ON_ONCE(1); 1507 break; 1508 } 1509 } 1510 WRITE_ONCE(rcu_torture_current_version, 1511 rcu_torture_current_version + 1); 1512 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1513 if (can_expedite && 1514 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1515 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1516 if (expediting >= 0) 1517 rcu_expedite_gp(); 1518 else 1519 rcu_unexpedite_gp(); 1520 if (++expediting > 3) 1521 expediting = -expediting; 1522 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1523 can_expedite = !rcu_gp_is_expedited() && 1524 !rcu_gp_is_normal(); 1525 } 1526 rcu_torture_writer_state = RTWS_STUTTER; 1527 boot_ended = rcu_inkernel_boot_has_ended(); 1528 stutter_waited = stutter_wait("rcu_torture_writer"); 1529 if (stutter_waited && 1530 !atomic_read(&rcu_fwd_cb_nodelay) && 1531 !cur_ops->slow_gps && 1532 !torture_must_stop() && 1533 boot_ended) 1534 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1535 if (list_empty(&rcu_tortures[i].rtort_free) && 1536 rcu_access_pointer(rcu_torture_current) != 1537 &rcu_tortures[i]) { 1538 tracing_off(); 1539 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1540 rcu_ftrace_dump(DUMP_ALL); 1541 } 1542 if (stutter_waited) 1543 sched_set_normal(current, oldnice); 1544 } while (!torture_must_stop()); 1545 rcu_torture_current = NULL; // Let stats task know that we are done. 1546 /* Reset expediting back to unexpedited. */ 1547 if (expediting > 0) 1548 expediting = -expediting; 1549 while (can_expedite && expediting++ < 0) 1550 rcu_unexpedite_gp(); 1551 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1552 if (!can_expedite) 1553 pr_alert("%s" TORTURE_FLAG 1554 " Dynamic grace-period expediting was disabled.\n", 1555 torture_type); 1556 rcu_torture_writer_state = RTWS_STOPPING; 1557 torture_kthread_stopping("rcu_torture_writer"); 1558 return 0; 1559 } 1560 1561 /* 1562 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1563 * delay between calls. 1564 */ 1565 static int 1566 rcu_torture_fakewriter(void *arg) 1567 { 1568 unsigned long gp_snap; 1569 struct rcu_gp_oldstate gp_snap_full; 1570 DEFINE_TORTURE_RANDOM(rand); 1571 1572 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1573 set_user_nice(current, MAX_NICE); 1574 1575 if (WARN_ONCE(nsynctypes == 0, 1576 "%s: No update-side primitives.\n", __func__)) { 1577 /* 1578 * No updates primitives, so don't try updating. 1579 * The resulting test won't be testing much, hence the 1580 * above WARN_ONCE(). 1581 */ 1582 torture_kthread_stopping("rcu_torture_fakewriter"); 1583 return 0; 1584 } 1585 1586 do { 1587 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1588 if (cur_ops->cb_barrier != NULL && 1589 torture_random(&rand) % (nfakewriters * 8) == 0) { 1590 cur_ops->cb_barrier(); 1591 } else { 1592 switch (synctype[torture_random(&rand) % nsynctypes]) { 1593 case RTWS_DEF_FREE: 1594 break; 1595 case RTWS_EXP_SYNC: 1596 cur_ops->exp_sync(); 1597 break; 1598 case RTWS_COND_GET: 1599 gp_snap = cur_ops->get_gp_state(); 1600 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1601 cur_ops->cond_sync(gp_snap); 1602 break; 1603 case RTWS_COND_GET_EXP: 1604 gp_snap = cur_ops->get_gp_state_exp(); 1605 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1606 cur_ops->cond_sync_exp(gp_snap); 1607 break; 1608 case RTWS_COND_GET_FULL: 1609 cur_ops->get_gp_state_full(&gp_snap_full); 1610 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1611 cur_ops->cond_sync_full(&gp_snap_full); 1612 break; 1613 case RTWS_COND_GET_EXP_FULL: 1614 cur_ops->get_gp_state_full(&gp_snap_full); 1615 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1616 cur_ops->cond_sync_exp_full(&gp_snap_full); 1617 break; 1618 case RTWS_POLL_GET: 1619 gp_snap = cur_ops->start_gp_poll(); 1620 while (!cur_ops->poll_gp_state(gp_snap)) { 1621 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1622 &rand); 1623 } 1624 break; 1625 case RTWS_POLL_GET_FULL: 1626 cur_ops->start_gp_poll_full(&gp_snap_full); 1627 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1628 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1629 &rand); 1630 } 1631 break; 1632 case RTWS_POLL_GET_EXP: 1633 gp_snap = cur_ops->start_gp_poll_exp(); 1634 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1635 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1636 &rand); 1637 } 1638 break; 1639 case RTWS_POLL_GET_EXP_FULL: 1640 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1641 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1642 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1643 &rand); 1644 } 1645 break; 1646 case RTWS_SYNC: 1647 cur_ops->sync(); 1648 break; 1649 default: 1650 WARN_ON_ONCE(1); 1651 break; 1652 } 1653 } 1654 stutter_wait("rcu_torture_fakewriter"); 1655 } while (!torture_must_stop()); 1656 1657 torture_kthread_stopping("rcu_torture_fakewriter"); 1658 return 0; 1659 } 1660 1661 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1662 { 1663 kfree(rhp); 1664 } 1665 1666 // Set up and carry out testing of RCU's global memory ordering 1667 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1668 struct torture_random_state *trsp) 1669 { 1670 unsigned long loops; 1671 int noc = torture_num_online_cpus(); 1672 int rdrchked; 1673 int rdrchker; 1674 struct rcu_torture_reader_check *rtrcp; // Me. 1675 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1676 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1677 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1678 1679 if (myid < 0) 1680 return; // Don't try this from timer handlers. 1681 1682 // Increment my counter. 1683 rtrcp = &rcu_torture_reader_mbchk[myid]; 1684 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1685 1686 // Attempt to assign someone else some checking work. 1687 rdrchked = torture_random(trsp) % nrealreaders; 1688 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1689 rdrchker = torture_random(trsp) % nrealreaders; 1690 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1691 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1692 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1693 !READ_ONCE(rtp->rtort_chkp) && 1694 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1695 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1696 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1697 rtrcp->rtc_chkrdr = rdrchked; 1698 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1699 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1700 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1701 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1702 } 1703 1704 // If assigned some completed work, do it! 1705 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1706 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1707 return; // No work or work not yet ready. 1708 rdrchked = rtrcp_assigner->rtc_chkrdr; 1709 if (WARN_ON_ONCE(rdrchked < 0)) 1710 return; 1711 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1712 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1713 atomic_inc(&n_rcu_torture_mbchk_tries); 1714 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1715 atomic_inc(&n_rcu_torture_mbchk_fail); 1716 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1717 rtrcp_assigner->rtc_ready = 0; 1718 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1719 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1720 } 1721 1722 /* 1723 * Do one extension of an RCU read-side critical section using the 1724 * current reader state in readstate (set to zero for initial entry 1725 * to extended critical section), set the new state as specified by 1726 * newstate (set to zero for final exit from extended critical section), 1727 * and random-number-generator state in trsp. If this is neither the 1728 * beginning or end of the critical section and if there was actually a 1729 * change, do a ->read_delay(). 1730 */ 1731 static void rcutorture_one_extend(int *readstate, int newstate, 1732 struct torture_random_state *trsp, 1733 struct rt_read_seg *rtrsp) 1734 { 1735 unsigned long flags; 1736 int idxnew1 = -1; 1737 int idxnew2 = -1; 1738 int idxold1 = *readstate; 1739 int idxold2 = idxold1; 1740 int statesnew = ~*readstate & newstate; 1741 int statesold = *readstate & ~newstate; 1742 1743 WARN_ON_ONCE(idxold2 < 0); 1744 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1745 rtrsp->rt_readstate = newstate; 1746 1747 /* First, put new protection in place to avoid critical-section gap. */ 1748 if (statesnew & RCUTORTURE_RDR_BH) 1749 local_bh_disable(); 1750 if (statesnew & RCUTORTURE_RDR_RBH) 1751 rcu_read_lock_bh(); 1752 if (statesnew & RCUTORTURE_RDR_IRQ) 1753 local_irq_disable(); 1754 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1755 preempt_disable(); 1756 if (statesnew & RCUTORTURE_RDR_SCHED) 1757 rcu_read_lock_sched(); 1758 if (statesnew & RCUTORTURE_RDR_RCU_1) 1759 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1760 if (statesnew & RCUTORTURE_RDR_RCU_2) 1761 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1762 1763 /* 1764 * Next, remove old protection, in decreasing order of strength 1765 * to avoid unlock paths that aren't safe in the stronger 1766 * context. Namely: BH can not be enabled with disabled interrupts. 1767 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1768 * context. 1769 */ 1770 if (statesold & RCUTORTURE_RDR_IRQ) 1771 local_irq_enable(); 1772 if (statesold & RCUTORTURE_RDR_PREEMPT) 1773 preempt_enable(); 1774 if (statesold & RCUTORTURE_RDR_SCHED) 1775 rcu_read_unlock_sched(); 1776 if (statesold & RCUTORTURE_RDR_BH) 1777 local_bh_enable(); 1778 if (statesold & RCUTORTURE_RDR_RBH) 1779 rcu_read_unlock_bh(); 1780 if (statesold & RCUTORTURE_RDR_RCU_2) { 1781 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1782 WARN_ON_ONCE(idxnew2 != -1); 1783 idxold2 = 0; 1784 } 1785 if (statesold & RCUTORTURE_RDR_RCU_1) { 1786 bool lockit; 1787 1788 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1789 if (lockit) 1790 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1791 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1792 WARN_ON_ONCE(idxnew1 != -1); 1793 idxold1 = 0; 1794 if (lockit) 1795 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1796 } 1797 1798 /* Delay if neither beginning nor end and there was a change. */ 1799 if ((statesnew || statesold) && *readstate && newstate) 1800 cur_ops->read_delay(trsp, rtrsp); 1801 1802 /* Update the reader state. */ 1803 if (idxnew1 == -1) 1804 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1805 WARN_ON_ONCE(idxnew1 < 0); 1806 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1807 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1808 if (idxnew2 == -1) 1809 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1810 WARN_ON_ONCE(idxnew2 < 0); 1811 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1812 *readstate = idxnew1 | idxnew2 | newstate; 1813 WARN_ON_ONCE(*readstate < 0); 1814 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1815 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1816 } 1817 1818 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1819 static int rcutorture_extend_mask_max(void) 1820 { 1821 int mask; 1822 1823 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1824 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1825 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1826 return mask; 1827 } 1828 1829 /* Return a random protection state mask, but with at least one bit set. */ 1830 static int 1831 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1832 { 1833 int mask = rcutorture_extend_mask_max(); 1834 unsigned long randmask1 = torture_random(trsp) >> 8; 1835 unsigned long randmask2 = randmask1 >> 3; 1836 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1837 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1838 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1839 1840 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1841 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1842 if (!(randmask1 & 0x7)) 1843 mask = mask & randmask2; 1844 else 1845 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1846 1847 // Can't have nested RCU reader without outer RCU reader. 1848 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1849 if (oldmask & RCUTORTURE_RDR_RCU_1) 1850 mask &= ~RCUTORTURE_RDR_RCU_2; 1851 else 1852 mask |= RCUTORTURE_RDR_RCU_1; 1853 } 1854 1855 /* 1856 * Can't enable bh w/irq disabled. 1857 */ 1858 if (mask & RCUTORTURE_RDR_IRQ) 1859 mask |= oldmask & bhs; 1860 1861 /* 1862 * Ideally these sequences would be detected in debug builds 1863 * (regardless of RT), but until then don't stop testing 1864 * them on non-RT. 1865 */ 1866 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1867 /* Can't modify BH in atomic context */ 1868 if (oldmask & preempts_irq) 1869 mask &= ~bhs; 1870 if ((oldmask | mask) & preempts_irq) 1871 mask |= oldmask & bhs; 1872 } 1873 1874 return mask ?: RCUTORTURE_RDR_RCU_1; 1875 } 1876 1877 /* 1878 * Do a randomly selected number of extensions of an existing RCU read-side 1879 * critical section. 1880 */ 1881 static struct rt_read_seg * 1882 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1883 struct rt_read_seg *rtrsp) 1884 { 1885 int i; 1886 int j; 1887 int mask = rcutorture_extend_mask_max(); 1888 1889 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1890 if (!((mask - 1) & mask)) 1891 return rtrsp; /* Current RCU reader not extendable. */ 1892 /* Bias towards larger numbers of loops. */ 1893 i = (torture_random(trsp) >> 3); 1894 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1895 for (j = 0; j < i; j++) { 1896 mask = rcutorture_extend_mask(*readstate, trsp); 1897 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1898 } 1899 return &rtrsp[j]; 1900 } 1901 1902 /* 1903 * Do one read-side critical section, returning false if there was 1904 * no data to read. Can be invoked both from process context and 1905 * from a timer handler. 1906 */ 1907 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1908 { 1909 bool checkpolling = !(torture_random(trsp) & 0xfff); 1910 unsigned long cookie; 1911 struct rcu_gp_oldstate cookie_full; 1912 int i; 1913 unsigned long started; 1914 unsigned long completed; 1915 int newstate; 1916 struct rcu_torture *p; 1917 int pipe_count; 1918 int readstate = 0; 1919 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1920 struct rt_read_seg *rtrsp = &rtseg[0]; 1921 struct rt_read_seg *rtrsp1; 1922 unsigned long long ts; 1923 1924 WARN_ON_ONCE(!rcu_is_watching()); 1925 newstate = rcutorture_extend_mask(readstate, trsp); 1926 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1927 if (checkpolling) { 1928 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1929 cookie = cur_ops->get_gp_state(); 1930 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1931 cur_ops->get_gp_state_full(&cookie_full); 1932 } 1933 started = cur_ops->get_gp_seq(); 1934 ts = rcu_trace_clock_local(); 1935 p = rcu_dereference_check(rcu_torture_current, 1936 !cur_ops->readlock_held || cur_ops->readlock_held()); 1937 if (p == NULL) { 1938 /* Wait for rcu_torture_writer to get underway */ 1939 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1940 return false; 1941 } 1942 if (p->rtort_mbtest == 0) 1943 atomic_inc(&n_rcu_torture_mberror); 1944 rcu_torture_reader_do_mbchk(myid, p, trsp); 1945 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1946 preempt_disable(); 1947 pipe_count = READ_ONCE(p->rtort_pipe_count); 1948 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1949 /* Should not happen, but... */ 1950 pipe_count = RCU_TORTURE_PIPE_LEN; 1951 } 1952 completed = cur_ops->get_gp_seq(); 1953 if (pipe_count > 1) { 1954 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1955 ts, started, completed); 1956 rcu_ftrace_dump(DUMP_ALL); 1957 } 1958 __this_cpu_inc(rcu_torture_count[pipe_count]); 1959 completed = rcutorture_seq_diff(completed, started); 1960 if (completed > RCU_TORTURE_PIPE_LEN) { 1961 /* Should not happen, but... */ 1962 completed = RCU_TORTURE_PIPE_LEN; 1963 } 1964 __this_cpu_inc(rcu_torture_batch[completed]); 1965 preempt_enable(); 1966 if (checkpolling) { 1967 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1968 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1969 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 1970 __func__, 1971 rcu_torture_writer_state_getname(), 1972 rcu_torture_writer_state, 1973 cookie, cur_ops->get_gp_state()); 1974 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1975 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1976 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 1977 __func__, 1978 rcu_torture_writer_state_getname(), 1979 rcu_torture_writer_state, 1980 cpumask_pr_args(cpu_online_mask)); 1981 } 1982 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1983 WARN_ON_ONCE(readstate); 1984 // This next splat is expected behavior if leakpointer, especially 1985 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 1986 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 1987 1988 /* If error or close call, record the sequence of reader protections. */ 1989 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1990 i = 0; 1991 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1992 err_segs[i++] = *rtrsp1; 1993 rt_read_nsegs = i; 1994 } 1995 1996 return true; 1997 } 1998 1999 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2000 2001 /* 2002 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2003 * incrementing the corresponding element of the pipeline array. The 2004 * counter in the element should never be greater than 1, otherwise, the 2005 * RCU implementation is broken. 2006 */ 2007 static void rcu_torture_timer(struct timer_list *unused) 2008 { 2009 atomic_long_inc(&n_rcu_torture_timers); 2010 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2011 2012 /* Test call_rcu() invocation from interrupt handler. */ 2013 if (cur_ops->call) { 2014 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2015 2016 if (rhp) 2017 cur_ops->call(rhp, rcu_torture_timer_cb); 2018 } 2019 } 2020 2021 /* 2022 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2023 * incrementing the corresponding element of the pipeline array. The 2024 * counter in the element should never be greater than 1, otherwise, the 2025 * RCU implementation is broken. 2026 */ 2027 static int 2028 rcu_torture_reader(void *arg) 2029 { 2030 unsigned long lastsleep = jiffies; 2031 long myid = (long)arg; 2032 int mynumonline = myid; 2033 DEFINE_TORTURE_RANDOM(rand); 2034 struct timer_list t; 2035 2036 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2037 set_user_nice(current, MAX_NICE); 2038 if (irqreader && cur_ops->irq_capable) 2039 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2040 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2041 do { 2042 if (irqreader && cur_ops->irq_capable) { 2043 if (!timer_pending(&t)) 2044 mod_timer(&t, jiffies + 1); 2045 } 2046 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2047 schedule_timeout_interruptible(HZ); 2048 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2049 torture_hrtimeout_us(500, 1000, &rand); 2050 lastsleep = jiffies + 10; 2051 } 2052 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2053 schedule_timeout_interruptible(HZ / 5); 2054 stutter_wait("rcu_torture_reader"); 2055 } while (!torture_must_stop()); 2056 if (irqreader && cur_ops->irq_capable) { 2057 del_timer_sync(&t); 2058 destroy_timer_on_stack(&t); 2059 } 2060 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2061 torture_kthread_stopping("rcu_torture_reader"); 2062 return 0; 2063 } 2064 2065 /* 2066 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2067 * increase race probabilities and fuzzes the interval between toggling. 2068 */ 2069 static int rcu_nocb_toggle(void *arg) 2070 { 2071 int cpu; 2072 int maxcpu = -1; 2073 int oldnice = task_nice(current); 2074 long r; 2075 DEFINE_TORTURE_RANDOM(rand); 2076 ktime_t toggle_delay; 2077 unsigned long toggle_fuzz; 2078 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2079 2080 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2081 while (!rcu_inkernel_boot_has_ended()) 2082 schedule_timeout_interruptible(HZ / 10); 2083 for_each_online_cpu(cpu) 2084 maxcpu = cpu; 2085 WARN_ON(maxcpu < 0); 2086 if (toggle_interval > ULONG_MAX) 2087 toggle_fuzz = ULONG_MAX >> 3; 2088 else 2089 toggle_fuzz = toggle_interval >> 3; 2090 if (toggle_fuzz <= 0) 2091 toggle_fuzz = NSEC_PER_USEC; 2092 do { 2093 r = torture_random(&rand); 2094 cpu = (r >> 4) % (maxcpu + 1); 2095 if (r & 0x1) { 2096 rcu_nocb_cpu_offload(cpu); 2097 atomic_long_inc(&n_nocb_offload); 2098 } else { 2099 rcu_nocb_cpu_deoffload(cpu); 2100 atomic_long_inc(&n_nocb_deoffload); 2101 } 2102 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2103 set_current_state(TASK_INTERRUPTIBLE); 2104 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2105 if (stutter_wait("rcu_nocb_toggle")) 2106 sched_set_normal(current, oldnice); 2107 } while (!torture_must_stop()); 2108 torture_kthread_stopping("rcu_nocb_toggle"); 2109 return 0; 2110 } 2111 2112 /* 2113 * Print torture statistics. Caller must ensure that there is only 2114 * one call to this function at a given time!!! This is normally 2115 * accomplished by relying on the module system to only have one copy 2116 * of the module loaded, and then by giving the rcu_torture_stats 2117 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2118 * thread is not running). 2119 */ 2120 static void 2121 rcu_torture_stats_print(void) 2122 { 2123 int cpu; 2124 int i; 2125 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2126 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2127 struct rcu_torture *rtcp; 2128 static unsigned long rtcv_snap = ULONG_MAX; 2129 static bool splatted; 2130 struct task_struct *wtp; 2131 2132 for_each_possible_cpu(cpu) { 2133 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2134 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2135 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2136 } 2137 } 2138 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2139 if (pipesummary[i] != 0) 2140 break; 2141 } 2142 2143 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2144 rtcp = rcu_access_pointer(rcu_torture_current); 2145 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2146 rtcp, 2147 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2148 rcu_torture_current_version, 2149 list_empty(&rcu_torture_freelist), 2150 atomic_read(&n_rcu_torture_alloc), 2151 atomic_read(&n_rcu_torture_alloc_fail), 2152 atomic_read(&n_rcu_torture_free)); 2153 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", 2154 atomic_read(&n_rcu_torture_mberror), 2155 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2156 n_rcu_torture_barrier_error, 2157 n_rcu_torture_boost_ktrerror, 2158 n_rcu_torture_boost_rterror); 2159 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2160 n_rcu_torture_boost_failure, 2161 n_rcu_torture_boosts, 2162 atomic_long_read(&n_rcu_torture_timers)); 2163 torture_onoff_stats(); 2164 pr_cont("barrier: %ld/%ld:%ld ", 2165 data_race(n_barrier_successes), 2166 data_race(n_barrier_attempts), 2167 data_race(n_rcu_torture_barrier_error)); 2168 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2169 pr_cont("nocb-toggles: %ld:%ld\n", 2170 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2171 2172 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2173 if (atomic_read(&n_rcu_torture_mberror) || 2174 atomic_read(&n_rcu_torture_mbchk_fail) || 2175 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2176 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 2177 i > 1) { 2178 pr_cont("%s", "!!! "); 2179 atomic_inc(&n_rcu_torture_error); 2180 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2181 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2182 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2183 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2184 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 2185 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2186 WARN_ON_ONCE(i > 1); // Too-short grace period 2187 } 2188 pr_cont("Reader Pipe: "); 2189 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2190 pr_cont(" %ld", pipesummary[i]); 2191 pr_cont("\n"); 2192 2193 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2194 pr_cont("Reader Batch: "); 2195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2196 pr_cont(" %ld", batchsummary[i]); 2197 pr_cont("\n"); 2198 2199 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2200 pr_cont("Free-Block Circulation: "); 2201 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2202 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2203 } 2204 pr_cont("\n"); 2205 2206 if (cur_ops->stats) 2207 cur_ops->stats(); 2208 if (rtcv_snap == rcu_torture_current_version && 2209 rcu_access_pointer(rcu_torture_current) && 2210 !rcu_stall_is_suppressed()) { 2211 int __maybe_unused flags = 0; 2212 unsigned long __maybe_unused gp_seq = 0; 2213 2214 rcutorture_get_gp_data(cur_ops->ttype, 2215 &flags, &gp_seq); 2216 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 2217 &flags, &gp_seq); 2218 wtp = READ_ONCE(writer_task); 2219 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2220 rcu_torture_writer_state_getname(), 2221 rcu_torture_writer_state, gp_seq, flags, 2222 wtp == NULL ? ~0U : wtp->__state, 2223 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2224 if (!splatted && wtp) { 2225 sched_show_task(wtp); 2226 splatted = true; 2227 } 2228 if (cur_ops->gp_kthread_dbg) 2229 cur_ops->gp_kthread_dbg(); 2230 rcu_ftrace_dump(DUMP_ALL); 2231 } 2232 rtcv_snap = rcu_torture_current_version; 2233 } 2234 2235 /* 2236 * Periodically prints torture statistics, if periodic statistics printing 2237 * was specified via the stat_interval module parameter. 2238 */ 2239 static int 2240 rcu_torture_stats(void *arg) 2241 { 2242 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2243 do { 2244 schedule_timeout_interruptible(stat_interval * HZ); 2245 rcu_torture_stats_print(); 2246 torture_shutdown_absorb("rcu_torture_stats"); 2247 } while (!torture_must_stop()); 2248 torture_kthread_stopping("rcu_torture_stats"); 2249 return 0; 2250 } 2251 2252 /* Test mem_dump_obj() and friends. */ 2253 static void rcu_torture_mem_dump_obj(void) 2254 { 2255 struct rcu_head *rhp; 2256 struct kmem_cache *kcp; 2257 static int z; 2258 2259 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2260 if (WARN_ON_ONCE(!kcp)) 2261 return; 2262 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2263 if (WARN_ON_ONCE(!rhp)) { 2264 kmem_cache_destroy(kcp); 2265 return; 2266 } 2267 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2268 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2269 mem_dump_obj(ZERO_SIZE_PTR); 2270 pr_alert("mem_dump_obj(NULL):"); 2271 mem_dump_obj(NULL); 2272 pr_alert("mem_dump_obj(%px):", &rhp); 2273 mem_dump_obj(&rhp); 2274 pr_alert("mem_dump_obj(%px):", rhp); 2275 mem_dump_obj(rhp); 2276 pr_alert("mem_dump_obj(%px):", &rhp->func); 2277 mem_dump_obj(&rhp->func); 2278 pr_alert("mem_dump_obj(%px):", &z); 2279 mem_dump_obj(&z); 2280 kmem_cache_free(kcp, rhp); 2281 kmem_cache_destroy(kcp); 2282 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2283 if (WARN_ON_ONCE(!rhp)) 2284 return; 2285 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2286 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2287 mem_dump_obj(rhp); 2288 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2289 mem_dump_obj(&rhp->func); 2290 kfree(rhp); 2291 rhp = vmalloc(4096); 2292 if (WARN_ON_ONCE(!rhp)) 2293 return; 2294 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2295 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2296 mem_dump_obj(rhp); 2297 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2298 mem_dump_obj(&rhp->func); 2299 vfree(rhp); 2300 } 2301 2302 static void 2303 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2304 { 2305 pr_alert("%s" TORTURE_FLAG 2306 "--- %s: nreaders=%d nfakewriters=%d " 2307 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2308 "shuffle_interval=%d stutter=%d irqreader=%d " 2309 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2310 "test_boost=%d/%d test_boost_interval=%d " 2311 "test_boost_duration=%d shutdown_secs=%d " 2312 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2313 "stall_cpu_block=%d " 2314 "n_barrier_cbs=%d " 2315 "onoff_interval=%d onoff_holdoff=%d " 2316 "read_exit_delay=%d read_exit_burst=%d " 2317 "nocbs_nthreads=%d nocbs_toggle=%d\n", 2318 torture_type, tag, nrealreaders, nfakewriters, 2319 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2320 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2321 test_boost, cur_ops->can_boost, 2322 test_boost_interval, test_boost_duration, shutdown_secs, 2323 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2324 stall_cpu_block, 2325 n_barrier_cbs, 2326 onoff_interval, onoff_holdoff, 2327 read_exit_delay, read_exit_burst, 2328 nocbs_nthreads, nocbs_toggle); 2329 } 2330 2331 static int rcutorture_booster_cleanup(unsigned int cpu) 2332 { 2333 struct task_struct *t; 2334 2335 if (boost_tasks[cpu] == NULL) 2336 return 0; 2337 mutex_lock(&boost_mutex); 2338 t = boost_tasks[cpu]; 2339 boost_tasks[cpu] = NULL; 2340 rcu_torture_enable_rt_throttle(); 2341 mutex_unlock(&boost_mutex); 2342 2343 /* This must be outside of the mutex, otherwise deadlock! */ 2344 torture_stop_kthread(rcu_torture_boost, t); 2345 return 0; 2346 } 2347 2348 static int rcutorture_booster_init(unsigned int cpu) 2349 { 2350 int retval; 2351 2352 if (boost_tasks[cpu] != NULL) 2353 return 0; /* Already created, nothing more to do. */ 2354 2355 // Testing RCU priority boosting requires rcutorture do 2356 // some serious abuse. Counter this by running ksoftirqd 2357 // at higher priority. 2358 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2359 struct sched_param sp; 2360 struct task_struct *t; 2361 2362 t = per_cpu(ksoftirqd, cpu); 2363 WARN_ON_ONCE(!t); 2364 sp.sched_priority = 2; 2365 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2366 } 2367 2368 /* Don't allow time recalculation while creating a new task. */ 2369 mutex_lock(&boost_mutex); 2370 rcu_torture_disable_rt_throttle(); 2371 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2372 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2373 cpu, "rcu_torture_boost_%u"); 2374 if (IS_ERR(boost_tasks[cpu])) { 2375 retval = PTR_ERR(boost_tasks[cpu]); 2376 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2377 n_rcu_torture_boost_ktrerror++; 2378 boost_tasks[cpu] = NULL; 2379 mutex_unlock(&boost_mutex); 2380 return retval; 2381 } 2382 mutex_unlock(&boost_mutex); 2383 return 0; 2384 } 2385 2386 /* 2387 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2388 * induces a CPU stall for the time specified by stall_cpu. 2389 */ 2390 static int rcu_torture_stall(void *args) 2391 { 2392 int idx; 2393 unsigned long stop_at; 2394 2395 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2396 if (stall_cpu_holdoff > 0) { 2397 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2398 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2399 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2400 } 2401 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2402 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2403 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2404 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2405 if (kthread_should_stop()) 2406 break; 2407 schedule_timeout_uninterruptible(HZ); 2408 } 2409 } 2410 if (!kthread_should_stop() && stall_cpu > 0) { 2411 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2412 stop_at = ktime_get_seconds() + stall_cpu; 2413 /* RCU CPU stall is expected behavior in following code. */ 2414 idx = cur_ops->readlock(); 2415 if (stall_cpu_irqsoff) 2416 local_irq_disable(); 2417 else if (!stall_cpu_block) 2418 preempt_disable(); 2419 pr_alert("%s start on CPU %d.\n", 2420 __func__, raw_smp_processor_id()); 2421 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 2422 stop_at)) 2423 if (stall_cpu_block) { 2424 #ifdef CONFIG_PREEMPTION 2425 preempt_schedule(); 2426 #else 2427 schedule_timeout_uninterruptible(HZ); 2428 #endif 2429 } else if (stall_no_softlockup) { 2430 touch_softlockup_watchdog(); 2431 } 2432 if (stall_cpu_irqsoff) 2433 local_irq_enable(); 2434 else if (!stall_cpu_block) 2435 preempt_enable(); 2436 cur_ops->readunlock(idx); 2437 } 2438 pr_alert("%s end.\n", __func__); 2439 torture_shutdown_absorb("rcu_torture_stall"); 2440 while (!kthread_should_stop()) 2441 schedule_timeout_interruptible(10 * HZ); 2442 return 0; 2443 } 2444 2445 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2446 static int __init rcu_torture_stall_init(void) 2447 { 2448 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2449 return 0; 2450 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2451 } 2452 2453 /* State structure for forward-progress self-propagating RCU callback. */ 2454 struct fwd_cb_state { 2455 struct rcu_head rh; 2456 int stop; 2457 }; 2458 2459 /* 2460 * Forward-progress self-propagating RCU callback function. Because 2461 * callbacks run from softirq, this function is an implicit RCU read-side 2462 * critical section. 2463 */ 2464 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2465 { 2466 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2467 2468 if (READ_ONCE(fcsp->stop)) { 2469 WRITE_ONCE(fcsp->stop, 2); 2470 return; 2471 } 2472 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2473 } 2474 2475 /* State for continuous-flood RCU callbacks. */ 2476 struct rcu_fwd_cb { 2477 struct rcu_head rh; 2478 struct rcu_fwd_cb *rfc_next; 2479 struct rcu_fwd *rfc_rfp; 2480 int rfc_gps; 2481 }; 2482 2483 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2484 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2485 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2486 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2487 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2488 2489 struct rcu_launder_hist { 2490 long n_launders; 2491 unsigned long launder_gp_seq; 2492 }; 2493 2494 struct rcu_fwd { 2495 spinlock_t rcu_fwd_lock; 2496 struct rcu_fwd_cb *rcu_fwd_cb_head; 2497 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2498 long n_launders_cb; 2499 unsigned long rcu_fwd_startat; 2500 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2501 unsigned long rcu_launder_gp_seq_start; 2502 int rcu_fwd_id; 2503 }; 2504 2505 static DEFINE_MUTEX(rcu_fwd_mutex); 2506 static struct rcu_fwd *rcu_fwds; 2507 static unsigned long rcu_fwd_seq; 2508 static atomic_long_t rcu_fwd_max_cbs; 2509 static bool rcu_fwd_emergency_stop; 2510 2511 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2512 { 2513 unsigned long gps; 2514 unsigned long gps_old; 2515 int i; 2516 int j; 2517 2518 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2519 if (rfp->n_launders_hist[i].n_launders > 0) 2520 break; 2521 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2522 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2523 gps_old = rfp->rcu_launder_gp_seq_start; 2524 for (j = 0; j <= i; j++) { 2525 gps = rfp->n_launders_hist[j].launder_gp_seq; 2526 pr_cont(" %ds/%d: %ld:%ld", 2527 j + 1, FWD_CBS_HIST_DIV, 2528 rfp->n_launders_hist[j].n_launders, 2529 rcutorture_seq_diff(gps, gps_old)); 2530 gps_old = gps; 2531 } 2532 pr_cont("\n"); 2533 } 2534 2535 /* Callback function for continuous-flood RCU callbacks. */ 2536 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2537 { 2538 unsigned long flags; 2539 int i; 2540 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2541 struct rcu_fwd_cb **rfcpp; 2542 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2543 2544 rfcp->rfc_next = NULL; 2545 rfcp->rfc_gps++; 2546 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2547 rfcpp = rfp->rcu_fwd_cb_tail; 2548 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2549 WRITE_ONCE(*rfcpp, rfcp); 2550 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2551 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2552 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2553 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2554 rfp->n_launders_hist[i].n_launders++; 2555 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2556 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2557 } 2558 2559 // Give the scheduler a chance, even on nohz_full CPUs. 2560 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2561 { 2562 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2563 // Real call_rcu() floods hit userspace, so emulate that. 2564 if (need_resched() || (iter & 0xfff)) 2565 schedule(); 2566 return; 2567 } 2568 // No userspace emulation: CB invocation throttles call_rcu() 2569 cond_resched(); 2570 } 2571 2572 /* 2573 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2574 * test is over or because we hit an OOM event. 2575 */ 2576 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2577 { 2578 unsigned long flags; 2579 unsigned long freed = 0; 2580 struct rcu_fwd_cb *rfcp; 2581 2582 for (;;) { 2583 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2584 rfcp = rfp->rcu_fwd_cb_head; 2585 if (!rfcp) { 2586 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2587 break; 2588 } 2589 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2590 if (!rfp->rcu_fwd_cb_head) 2591 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2592 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2593 kfree(rfcp); 2594 freed++; 2595 rcu_torture_fwd_prog_cond_resched(freed); 2596 if (tick_nohz_full_enabled()) { 2597 local_irq_save(flags); 2598 rcu_momentary_dyntick_idle(); 2599 local_irq_restore(flags); 2600 } 2601 } 2602 return freed; 2603 } 2604 2605 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2606 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2607 int *tested, int *tested_tries) 2608 { 2609 unsigned long cver; 2610 unsigned long dur; 2611 struct fwd_cb_state fcs; 2612 unsigned long gps; 2613 int idx; 2614 int sd; 2615 int sd4; 2616 bool selfpropcb = false; 2617 unsigned long stopat; 2618 static DEFINE_TORTURE_RANDOM(trs); 2619 2620 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2621 if (!cur_ops->sync) 2622 return; // Cannot do need_resched() forward progress testing without ->sync. 2623 if (cur_ops->call && cur_ops->cb_barrier) { 2624 init_rcu_head_on_stack(&fcs.rh); 2625 selfpropcb = true; 2626 } 2627 2628 /* Tight loop containing cond_resched(). */ 2629 atomic_inc(&rcu_fwd_cb_nodelay); 2630 cur_ops->sync(); /* Later readers see above write. */ 2631 if (selfpropcb) { 2632 WRITE_ONCE(fcs.stop, 0); 2633 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2634 } 2635 cver = READ_ONCE(rcu_torture_current_version); 2636 gps = cur_ops->get_gp_seq(); 2637 sd = cur_ops->stall_dur() + 1; 2638 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2639 dur = sd4 + torture_random(&trs) % (sd - sd4); 2640 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2641 stopat = rfp->rcu_fwd_startat + dur; 2642 while (time_before(jiffies, stopat) && 2643 !shutdown_time_arrived() && 2644 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2645 idx = cur_ops->readlock(); 2646 udelay(10); 2647 cur_ops->readunlock(idx); 2648 if (!fwd_progress_need_resched || need_resched()) 2649 cond_resched(); 2650 } 2651 (*tested_tries)++; 2652 if (!time_before(jiffies, stopat) && 2653 !shutdown_time_arrived() && 2654 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2655 (*tested)++; 2656 cver = READ_ONCE(rcu_torture_current_version) - cver; 2657 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2658 WARN_ON(!cver && gps < 2); 2659 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2660 rfp->rcu_fwd_id, dur, cver, gps); 2661 } 2662 if (selfpropcb) { 2663 WRITE_ONCE(fcs.stop, 1); 2664 cur_ops->sync(); /* Wait for running CB to complete. */ 2665 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2666 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2667 } 2668 2669 if (selfpropcb) { 2670 WARN_ON(READ_ONCE(fcs.stop) != 2); 2671 destroy_rcu_head_on_stack(&fcs.rh); 2672 } 2673 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2674 atomic_dec(&rcu_fwd_cb_nodelay); 2675 } 2676 2677 /* Carry out call_rcu() forward-progress testing. */ 2678 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2679 { 2680 unsigned long cver; 2681 unsigned long flags; 2682 unsigned long gps; 2683 int i; 2684 long n_launders; 2685 long n_launders_cb_snap; 2686 long n_launders_sa; 2687 long n_max_cbs; 2688 long n_max_gps; 2689 struct rcu_fwd_cb *rfcp; 2690 struct rcu_fwd_cb *rfcpn; 2691 unsigned long stopat; 2692 unsigned long stoppedat; 2693 2694 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2695 if (READ_ONCE(rcu_fwd_emergency_stop)) 2696 return; /* Get out of the way quickly, no GP wait! */ 2697 if (!cur_ops->call) 2698 return; /* Can't do call_rcu() fwd prog without ->call. */ 2699 2700 /* Loop continuously posting RCU callbacks. */ 2701 atomic_inc(&rcu_fwd_cb_nodelay); 2702 cur_ops->sync(); /* Later readers see above write. */ 2703 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2704 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2705 n_launders = 0; 2706 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2707 n_launders_sa = 0; 2708 n_max_cbs = 0; 2709 n_max_gps = 0; 2710 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2711 rfp->n_launders_hist[i].n_launders = 0; 2712 cver = READ_ONCE(rcu_torture_current_version); 2713 gps = cur_ops->get_gp_seq(); 2714 rfp->rcu_launder_gp_seq_start = gps; 2715 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2716 while (time_before(jiffies, stopat) && 2717 !shutdown_time_arrived() && 2718 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2719 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2720 rfcpn = NULL; 2721 if (rfcp) 2722 rfcpn = READ_ONCE(rfcp->rfc_next); 2723 if (rfcpn) { 2724 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2725 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2726 break; 2727 rfp->rcu_fwd_cb_head = rfcpn; 2728 n_launders++; 2729 n_launders_sa++; 2730 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2731 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2732 if (WARN_ON_ONCE(!rfcp)) { 2733 schedule_timeout_interruptible(1); 2734 continue; 2735 } 2736 n_max_cbs++; 2737 n_launders_sa = 0; 2738 rfcp->rfc_gps = 0; 2739 rfcp->rfc_rfp = rfp; 2740 } else { 2741 rfcp = NULL; 2742 } 2743 if (rfcp) 2744 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2745 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2746 if (tick_nohz_full_enabled()) { 2747 local_irq_save(flags); 2748 rcu_momentary_dyntick_idle(); 2749 local_irq_restore(flags); 2750 } 2751 } 2752 stoppedat = jiffies; 2753 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2754 cver = READ_ONCE(rcu_torture_current_version) - cver; 2755 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2756 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2757 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2758 (void)rcu_torture_fwd_prog_cbfree(rfp); 2759 2760 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2761 !shutdown_time_arrived()) { 2762 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2763 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2764 __func__, 2765 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2766 n_launders + n_max_cbs - n_launders_cb_snap, 2767 n_launders, n_launders_sa, 2768 n_max_gps, n_max_cbs, cver, gps); 2769 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2770 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2771 rcu_torture_fwd_cb_hist(rfp); 2772 mutex_unlock(&rcu_fwd_mutex); 2773 } 2774 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2775 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2776 atomic_dec(&rcu_fwd_cb_nodelay); 2777 } 2778 2779 2780 /* 2781 * OOM notifier, but this only prints diagnostic information for the 2782 * current forward-progress test. 2783 */ 2784 static int rcutorture_oom_notify(struct notifier_block *self, 2785 unsigned long notused, void *nfreed) 2786 { 2787 int i; 2788 long ncbs; 2789 struct rcu_fwd *rfp; 2790 2791 mutex_lock(&rcu_fwd_mutex); 2792 rfp = rcu_fwds; 2793 if (!rfp) { 2794 mutex_unlock(&rcu_fwd_mutex); 2795 return NOTIFY_OK; 2796 } 2797 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2798 __func__); 2799 for (i = 0; i < fwd_progress; i++) { 2800 rcu_torture_fwd_cb_hist(&rfp[i]); 2801 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2802 } 2803 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2804 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2805 ncbs = 0; 2806 for (i = 0; i < fwd_progress; i++) 2807 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2808 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2809 cur_ops->cb_barrier(); 2810 ncbs = 0; 2811 for (i = 0; i < fwd_progress; i++) 2812 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2813 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2814 cur_ops->cb_barrier(); 2815 ncbs = 0; 2816 for (i = 0; i < fwd_progress; i++) 2817 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2818 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2819 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2820 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2821 pr_info("%s returning after OOM processing.\n", __func__); 2822 mutex_unlock(&rcu_fwd_mutex); 2823 return NOTIFY_OK; 2824 } 2825 2826 static struct notifier_block rcutorture_oom_nb = { 2827 .notifier_call = rcutorture_oom_notify 2828 }; 2829 2830 /* Carry out grace-period forward-progress testing. */ 2831 static int rcu_torture_fwd_prog(void *args) 2832 { 2833 bool firsttime = true; 2834 long max_cbs; 2835 int oldnice = task_nice(current); 2836 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2837 struct rcu_fwd *rfp = args; 2838 int tested = 0; 2839 int tested_tries = 0; 2840 2841 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2842 rcu_bind_current_to_nocb(); 2843 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2844 set_user_nice(current, MAX_NICE); 2845 do { 2846 if (!rfp->rcu_fwd_id) { 2847 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2848 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2849 if (!firsttime) { 2850 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2851 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2852 } 2853 firsttime = false; 2854 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2855 } else { 2856 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2857 schedule_timeout_interruptible(1); 2858 oldseq = READ_ONCE(rcu_fwd_seq); 2859 } 2860 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2861 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2862 rcu_torture_fwd_prog_cr(rfp); 2863 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2864 (!IS_ENABLED(CONFIG_TINY_RCU) || 2865 (rcu_inkernel_boot_has_ended() && 2866 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2867 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2868 2869 /* Avoid slow periods, better to test when busy. */ 2870 if (stutter_wait("rcu_torture_fwd_prog")) 2871 sched_set_normal(current, oldnice); 2872 } while (!torture_must_stop()); 2873 /* Short runs might not contain a valid forward-progress attempt. */ 2874 if (!rfp->rcu_fwd_id) { 2875 WARN_ON(!tested && tested_tries >= 5); 2876 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2877 } 2878 torture_kthread_stopping("rcu_torture_fwd_prog"); 2879 return 0; 2880 } 2881 2882 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2883 static int __init rcu_torture_fwd_prog_init(void) 2884 { 2885 int i; 2886 int ret = 0; 2887 struct rcu_fwd *rfp; 2888 2889 if (!fwd_progress) 2890 return 0; /* Not requested, so don't do it. */ 2891 if (fwd_progress >= nr_cpu_ids) { 2892 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2893 fwd_progress = nr_cpu_ids; 2894 } else if (fwd_progress < 0) { 2895 fwd_progress = nr_cpu_ids; 2896 } 2897 if ((!cur_ops->sync && !cur_ops->call) || 2898 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2899 cur_ops == &rcu_busted_ops) { 2900 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2901 fwd_progress = 0; 2902 return 0; 2903 } 2904 if (stall_cpu > 0) { 2905 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2906 fwd_progress = 0; 2907 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2908 return -EINVAL; /* In module, can fail back to user. */ 2909 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2910 return 0; 2911 } 2912 if (fwd_progress_holdoff <= 0) 2913 fwd_progress_holdoff = 1; 2914 if (fwd_progress_div <= 0) 2915 fwd_progress_div = 4; 2916 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 2917 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 2918 if (!rfp || !fwd_prog_tasks) { 2919 kfree(rfp); 2920 kfree(fwd_prog_tasks); 2921 fwd_prog_tasks = NULL; 2922 fwd_progress = 0; 2923 return -ENOMEM; 2924 } 2925 for (i = 0; i < fwd_progress; i++) { 2926 spin_lock_init(&rfp[i].rcu_fwd_lock); 2927 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 2928 rfp[i].rcu_fwd_id = i; 2929 } 2930 mutex_lock(&rcu_fwd_mutex); 2931 rcu_fwds = rfp; 2932 mutex_unlock(&rcu_fwd_mutex); 2933 register_oom_notifier(&rcutorture_oom_nb); 2934 for (i = 0; i < fwd_progress; i++) { 2935 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 2936 if (ret) { 2937 fwd_progress = i; 2938 return ret; 2939 } 2940 } 2941 return 0; 2942 } 2943 2944 static void rcu_torture_fwd_prog_cleanup(void) 2945 { 2946 int i; 2947 struct rcu_fwd *rfp; 2948 2949 if (!rcu_fwds || !fwd_prog_tasks) 2950 return; 2951 for (i = 0; i < fwd_progress; i++) 2952 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 2953 unregister_oom_notifier(&rcutorture_oom_nb); 2954 mutex_lock(&rcu_fwd_mutex); 2955 rfp = rcu_fwds; 2956 rcu_fwds = NULL; 2957 mutex_unlock(&rcu_fwd_mutex); 2958 kfree(rfp); 2959 kfree(fwd_prog_tasks); 2960 fwd_prog_tasks = NULL; 2961 } 2962 2963 /* Callback function for RCU barrier testing. */ 2964 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2965 { 2966 atomic_inc(&barrier_cbs_invoked); 2967 } 2968 2969 /* IPI handler to get callback posted on desired CPU, if online. */ 2970 static void rcu_torture_barrier1cb(void *rcu_void) 2971 { 2972 struct rcu_head *rhp = rcu_void; 2973 2974 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2975 } 2976 2977 /* kthread function to register callbacks used to test RCU barriers. */ 2978 static int rcu_torture_barrier_cbs(void *arg) 2979 { 2980 long myid = (long)arg; 2981 bool lastphase = false; 2982 bool newphase; 2983 struct rcu_head rcu; 2984 2985 init_rcu_head_on_stack(&rcu); 2986 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2987 set_user_nice(current, MAX_NICE); 2988 do { 2989 wait_event(barrier_cbs_wq[myid], 2990 (newphase = 2991 smp_load_acquire(&barrier_phase)) != lastphase || 2992 torture_must_stop()); 2993 lastphase = newphase; 2994 if (torture_must_stop()) 2995 break; 2996 /* 2997 * The above smp_load_acquire() ensures barrier_phase load 2998 * is ordered before the following ->call(). 2999 */ 3000 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 3001 &rcu, 1)) { 3002 // IPI failed, so use direct call from current CPU. 3003 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3004 } 3005 if (atomic_dec_and_test(&barrier_cbs_count)) 3006 wake_up(&barrier_wq); 3007 } while (!torture_must_stop()); 3008 if (cur_ops->cb_barrier != NULL) 3009 cur_ops->cb_barrier(); 3010 destroy_rcu_head_on_stack(&rcu); 3011 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3012 return 0; 3013 } 3014 3015 /* kthread function to drive and coordinate RCU barrier testing. */ 3016 static int rcu_torture_barrier(void *arg) 3017 { 3018 int i; 3019 3020 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3021 do { 3022 atomic_set(&barrier_cbs_invoked, 0); 3023 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3024 /* Ensure barrier_phase ordered after prior assignments. */ 3025 smp_store_release(&barrier_phase, !barrier_phase); 3026 for (i = 0; i < n_barrier_cbs; i++) 3027 wake_up(&barrier_cbs_wq[i]); 3028 wait_event(barrier_wq, 3029 atomic_read(&barrier_cbs_count) == 0 || 3030 torture_must_stop()); 3031 if (torture_must_stop()) 3032 break; 3033 n_barrier_attempts++; 3034 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3035 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3036 n_rcu_torture_barrier_error++; 3037 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3038 atomic_read(&barrier_cbs_invoked), 3039 n_barrier_cbs); 3040 WARN_ON(1); 3041 // Wait manually for the remaining callbacks 3042 i = 0; 3043 do { 3044 if (WARN_ON(i++ > HZ)) 3045 i = INT_MIN; 3046 schedule_timeout_interruptible(1); 3047 cur_ops->cb_barrier(); 3048 } while (atomic_read(&barrier_cbs_invoked) != 3049 n_barrier_cbs && 3050 !torture_must_stop()); 3051 smp_mb(); // Can't trust ordering if broken. 3052 if (!torture_must_stop()) 3053 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3054 atomic_read(&barrier_cbs_invoked)); 3055 } else { 3056 n_barrier_successes++; 3057 } 3058 schedule_timeout_interruptible(HZ / 10); 3059 } while (!torture_must_stop()); 3060 torture_kthread_stopping("rcu_torture_barrier"); 3061 return 0; 3062 } 3063 3064 /* Initialize RCU barrier testing. */ 3065 static int rcu_torture_barrier_init(void) 3066 { 3067 int i; 3068 int ret; 3069 3070 if (n_barrier_cbs <= 0) 3071 return 0; 3072 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3073 pr_alert("%s" TORTURE_FLAG 3074 " Call or barrier ops missing for %s,\n", 3075 torture_type, cur_ops->name); 3076 pr_alert("%s" TORTURE_FLAG 3077 " RCU barrier testing omitted from run.\n", 3078 torture_type); 3079 return 0; 3080 } 3081 atomic_set(&barrier_cbs_count, 0); 3082 atomic_set(&barrier_cbs_invoked, 0); 3083 barrier_cbs_tasks = 3084 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3085 GFP_KERNEL); 3086 barrier_cbs_wq = 3087 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3088 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3089 return -ENOMEM; 3090 for (i = 0; i < n_barrier_cbs; i++) { 3091 init_waitqueue_head(&barrier_cbs_wq[i]); 3092 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3093 (void *)(long)i, 3094 barrier_cbs_tasks[i]); 3095 if (ret) 3096 return ret; 3097 } 3098 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3099 } 3100 3101 /* Clean up after RCU barrier testing. */ 3102 static void rcu_torture_barrier_cleanup(void) 3103 { 3104 int i; 3105 3106 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3107 if (barrier_cbs_tasks != NULL) { 3108 for (i = 0; i < n_barrier_cbs; i++) 3109 torture_stop_kthread(rcu_torture_barrier_cbs, 3110 barrier_cbs_tasks[i]); 3111 kfree(barrier_cbs_tasks); 3112 barrier_cbs_tasks = NULL; 3113 } 3114 if (barrier_cbs_wq != NULL) { 3115 kfree(barrier_cbs_wq); 3116 barrier_cbs_wq = NULL; 3117 } 3118 } 3119 3120 static bool rcu_torture_can_boost(void) 3121 { 3122 static int boost_warn_once; 3123 int prio; 3124 3125 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3126 return false; 3127 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3128 return false; 3129 3130 prio = rcu_get_gp_kthreads_prio(); 3131 if (!prio) 3132 return false; 3133 3134 if (prio < 2) { 3135 if (boost_warn_once == 1) 3136 return false; 3137 3138 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3139 boost_warn_once = 1; 3140 return false; 3141 } 3142 3143 return true; 3144 } 3145 3146 static bool read_exit_child_stop; 3147 static bool read_exit_child_stopped; 3148 static wait_queue_head_t read_exit_wq; 3149 3150 // Child kthread which just does an rcutorture reader and exits. 3151 static int rcu_torture_read_exit_child(void *trsp_in) 3152 { 3153 struct torture_random_state *trsp = trsp_in; 3154 3155 set_user_nice(current, MAX_NICE); 3156 // Minimize time between reading and exiting. 3157 while (!kthread_should_stop()) 3158 schedule_timeout_uninterruptible(1); 3159 (void)rcu_torture_one_read(trsp, -1); 3160 return 0; 3161 } 3162 3163 // Parent kthread which creates and destroys read-exit child kthreads. 3164 static int rcu_torture_read_exit(void *unused) 3165 { 3166 bool errexit = false; 3167 int i; 3168 struct task_struct *tsp; 3169 DEFINE_TORTURE_RANDOM(trs); 3170 3171 // Allocate and initialize. 3172 set_user_nice(current, MAX_NICE); 3173 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3174 3175 // Each pass through this loop does one read-exit episode. 3176 do { 3177 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3178 for (i = 0; i < read_exit_burst; i++) { 3179 if (READ_ONCE(read_exit_child_stop)) 3180 break; 3181 stutter_wait("rcu_torture_read_exit"); 3182 // Spawn child. 3183 tsp = kthread_run(rcu_torture_read_exit_child, 3184 &trs, "%s", "rcu_torture_read_exit_child"); 3185 if (IS_ERR(tsp)) { 3186 TOROUT_ERRSTRING("out of memory"); 3187 errexit = true; 3188 break; 3189 } 3190 cond_resched(); 3191 kthread_stop(tsp); 3192 n_read_exits++; 3193 } 3194 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3195 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3196 i = 0; 3197 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3198 schedule_timeout_uninterruptible(HZ); 3199 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3200 3201 // Clean up and exit. 3202 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3203 smp_mb(); // Store before wakeup. 3204 wake_up(&read_exit_wq); 3205 while (!torture_must_stop()) 3206 schedule_timeout_uninterruptible(1); 3207 torture_kthread_stopping("rcu_torture_read_exit"); 3208 return 0; 3209 } 3210 3211 static int rcu_torture_read_exit_init(void) 3212 { 3213 if (read_exit_burst <= 0) 3214 return 0; 3215 init_waitqueue_head(&read_exit_wq); 3216 read_exit_child_stop = false; 3217 read_exit_child_stopped = false; 3218 return torture_create_kthread(rcu_torture_read_exit, NULL, 3219 read_exit_task); 3220 } 3221 3222 static void rcu_torture_read_exit_cleanup(void) 3223 { 3224 if (!read_exit_task) 3225 return; 3226 WRITE_ONCE(read_exit_child_stop, true); 3227 smp_mb(); // Above write before wait. 3228 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3229 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3230 } 3231 3232 static enum cpuhp_state rcutor_hp; 3233 3234 static void 3235 rcu_torture_cleanup(void) 3236 { 3237 int firsttime; 3238 int flags = 0; 3239 unsigned long gp_seq = 0; 3240 int i; 3241 3242 if (torture_cleanup_begin()) { 3243 if (cur_ops->cb_barrier != NULL) { 3244 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3245 cur_ops->cb_barrier(); 3246 } 3247 rcu_gp_slow_unregister(NULL); 3248 return; 3249 } 3250 if (!cur_ops) { 3251 torture_cleanup_end(); 3252 rcu_gp_slow_unregister(NULL); 3253 return; 3254 } 3255 3256 if (cur_ops->gp_kthread_dbg) 3257 cur_ops->gp_kthread_dbg(); 3258 rcu_torture_read_exit_cleanup(); 3259 rcu_torture_barrier_cleanup(); 3260 rcu_torture_fwd_prog_cleanup(); 3261 torture_stop_kthread(rcu_torture_stall, stall_task); 3262 torture_stop_kthread(rcu_torture_writer, writer_task); 3263 3264 if (nocb_tasks) { 3265 for (i = 0; i < nrealnocbers; i++) 3266 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3267 kfree(nocb_tasks); 3268 nocb_tasks = NULL; 3269 } 3270 3271 if (reader_tasks) { 3272 for (i = 0; i < nrealreaders; i++) 3273 torture_stop_kthread(rcu_torture_reader, 3274 reader_tasks[i]); 3275 kfree(reader_tasks); 3276 reader_tasks = NULL; 3277 } 3278 kfree(rcu_torture_reader_mbchk); 3279 rcu_torture_reader_mbchk = NULL; 3280 3281 if (fakewriter_tasks) { 3282 for (i = 0; i < nfakewriters; i++) 3283 torture_stop_kthread(rcu_torture_fakewriter, 3284 fakewriter_tasks[i]); 3285 kfree(fakewriter_tasks); 3286 fakewriter_tasks = NULL; 3287 } 3288 3289 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3290 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3291 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3292 cur_ops->name, (long)gp_seq, flags, 3293 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3294 torture_stop_kthread(rcu_torture_stats, stats_task); 3295 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3296 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3297 cpuhp_remove_state(rcutor_hp); 3298 3299 /* 3300 * Wait for all RCU callbacks to fire, then do torture-type-specific 3301 * cleanup operations. 3302 */ 3303 if (cur_ops->cb_barrier != NULL) { 3304 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3305 cur_ops->cb_barrier(); 3306 } 3307 if (cur_ops->cleanup != NULL) 3308 cur_ops->cleanup(); 3309 3310 rcu_torture_mem_dump_obj(); 3311 3312 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3313 3314 if (err_segs_recorded) { 3315 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3316 if (rt_read_nsegs == 0) 3317 pr_alert("\t: No segments recorded!!!\n"); 3318 firsttime = 1; 3319 for (i = 0; i < rt_read_nsegs; i++) { 3320 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3321 if (err_segs[i].rt_delay_jiffies != 0) { 3322 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3323 err_segs[i].rt_delay_jiffies); 3324 firsttime = 0; 3325 } 3326 if (err_segs[i].rt_delay_ms != 0) { 3327 pr_cont("%s%ldms", firsttime ? "" : "+", 3328 err_segs[i].rt_delay_ms); 3329 firsttime = 0; 3330 } 3331 if (err_segs[i].rt_delay_us != 0) { 3332 pr_cont("%s%ldus", firsttime ? "" : "+", 3333 err_segs[i].rt_delay_us); 3334 firsttime = 0; 3335 } 3336 pr_cont("%s\n", 3337 err_segs[i].rt_preempted ? "preempted" : ""); 3338 3339 } 3340 } 3341 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3342 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3343 else if (torture_onoff_failures()) 3344 rcu_torture_print_module_parms(cur_ops, 3345 "End of test: RCU_HOTPLUG"); 3346 else 3347 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3348 torture_cleanup_end(); 3349 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay); 3350 } 3351 3352 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3353 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3354 { 3355 } 3356 3357 static void rcu_torture_err_cb(struct rcu_head *rhp) 3358 { 3359 /* 3360 * This -might- happen due to race conditions, but is unlikely. 3361 * The scenario that leads to this happening is that the 3362 * first of the pair of duplicate callbacks is queued, 3363 * someone else starts a grace period that includes that 3364 * callback, then the second of the pair must wait for the 3365 * next grace period. Unlikely, but can happen. If it 3366 * does happen, the debug-objects subsystem won't have splatted. 3367 */ 3368 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3369 } 3370 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3371 3372 /* 3373 * Verify that double-free causes debug-objects to complain, but only 3374 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3375 * cannot be carried out. 3376 */ 3377 static void rcu_test_debug_objects(void) 3378 { 3379 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3380 struct rcu_head rh1; 3381 struct rcu_head rh2; 3382 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3383 3384 init_rcu_head_on_stack(&rh1); 3385 init_rcu_head_on_stack(&rh2); 3386 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 3387 3388 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3389 preempt_disable(); /* Prevent preemption from interrupting test. */ 3390 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 3391 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3392 local_irq_disable(); /* Make it harder to start a new grace period. */ 3393 call_rcu(&rh2, rcu_torture_leak_cb); 3394 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3395 if (rhp) { 3396 call_rcu(rhp, rcu_torture_leak_cb); 3397 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3398 } 3399 local_irq_enable(); 3400 rcu_read_unlock(); 3401 preempt_enable(); 3402 3403 /* Wait for them all to get done so we can safely return. */ 3404 rcu_barrier(); 3405 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 3406 destroy_rcu_head_on_stack(&rh1); 3407 destroy_rcu_head_on_stack(&rh2); 3408 kfree(rhp); 3409 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3410 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 3411 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3412 } 3413 3414 static void rcutorture_sync(void) 3415 { 3416 static unsigned long n; 3417 3418 if (cur_ops->sync && !(++n & 0xfff)) 3419 cur_ops->sync(); 3420 } 3421 3422 static int __init 3423 rcu_torture_init(void) 3424 { 3425 long i; 3426 int cpu; 3427 int firsterr = 0; 3428 int flags = 0; 3429 unsigned long gp_seq = 0; 3430 static struct rcu_torture_ops *torture_ops[] = { 3431 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3432 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3433 &trivial_ops, 3434 }; 3435 3436 if (!torture_init_begin(torture_type, verbose)) 3437 return -EBUSY; 3438 3439 /* Process args and tell the world that the torturer is on the job. */ 3440 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3441 cur_ops = torture_ops[i]; 3442 if (strcmp(torture_type, cur_ops->name) == 0) 3443 break; 3444 } 3445 if (i == ARRAY_SIZE(torture_ops)) { 3446 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3447 torture_type); 3448 pr_alert("rcu-torture types:"); 3449 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3450 pr_cont(" %s", torture_ops[i]->name); 3451 pr_cont("\n"); 3452 firsterr = -EINVAL; 3453 cur_ops = NULL; 3454 goto unwind; 3455 } 3456 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3457 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3458 fqs_duration = 0; 3459 } 3460 if (cur_ops->init) 3461 cur_ops->init(); 3462 3463 if (nreaders >= 0) { 3464 nrealreaders = nreaders; 3465 } else { 3466 nrealreaders = num_online_cpus() - 2 - nreaders; 3467 if (nrealreaders <= 0) 3468 nrealreaders = 1; 3469 } 3470 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3471 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3472 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3473 start_gp_seq = gp_seq; 3474 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3475 cur_ops->name, (long)gp_seq, flags); 3476 3477 /* Set up the freelist. */ 3478 3479 INIT_LIST_HEAD(&rcu_torture_freelist); 3480 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3481 rcu_tortures[i].rtort_mbtest = 0; 3482 list_add_tail(&rcu_tortures[i].rtort_free, 3483 &rcu_torture_freelist); 3484 } 3485 3486 /* Initialize the statistics so that each run gets its own numbers. */ 3487 3488 rcu_torture_current = NULL; 3489 rcu_torture_current_version = 0; 3490 atomic_set(&n_rcu_torture_alloc, 0); 3491 atomic_set(&n_rcu_torture_alloc_fail, 0); 3492 atomic_set(&n_rcu_torture_free, 0); 3493 atomic_set(&n_rcu_torture_mberror, 0); 3494 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3495 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3496 atomic_set(&n_rcu_torture_error, 0); 3497 n_rcu_torture_barrier_error = 0; 3498 n_rcu_torture_boost_ktrerror = 0; 3499 n_rcu_torture_boost_rterror = 0; 3500 n_rcu_torture_boost_failure = 0; 3501 n_rcu_torture_boosts = 0; 3502 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3503 atomic_set(&rcu_torture_wcount[i], 0); 3504 for_each_possible_cpu(cpu) { 3505 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3506 per_cpu(rcu_torture_count, cpu)[i] = 0; 3507 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3508 } 3509 } 3510 err_segs_recorded = 0; 3511 rt_read_nsegs = 0; 3512 3513 /* Start up the kthreads. */ 3514 3515 rcu_torture_write_types(); 3516 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3517 writer_task); 3518 if (torture_init_error(firsterr)) 3519 goto unwind; 3520 if (nfakewriters > 0) { 3521 fakewriter_tasks = kcalloc(nfakewriters, 3522 sizeof(fakewriter_tasks[0]), 3523 GFP_KERNEL); 3524 if (fakewriter_tasks == NULL) { 3525 TOROUT_ERRSTRING("out of memory"); 3526 firsterr = -ENOMEM; 3527 goto unwind; 3528 } 3529 } 3530 for (i = 0; i < nfakewriters; i++) { 3531 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3532 NULL, fakewriter_tasks[i]); 3533 if (torture_init_error(firsterr)) 3534 goto unwind; 3535 } 3536 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3537 GFP_KERNEL); 3538 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3539 GFP_KERNEL); 3540 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3541 TOROUT_ERRSTRING("out of memory"); 3542 firsterr = -ENOMEM; 3543 goto unwind; 3544 } 3545 for (i = 0; i < nrealreaders; i++) { 3546 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3547 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3548 reader_tasks[i]); 3549 if (torture_init_error(firsterr)) 3550 goto unwind; 3551 } 3552 nrealnocbers = nocbs_nthreads; 3553 if (WARN_ON(nrealnocbers < 0)) 3554 nrealnocbers = 1; 3555 if (WARN_ON(nocbs_toggle < 0)) 3556 nocbs_toggle = HZ; 3557 if (nrealnocbers > 0) { 3558 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3559 if (nocb_tasks == NULL) { 3560 TOROUT_ERRSTRING("out of memory"); 3561 firsterr = -ENOMEM; 3562 goto unwind; 3563 } 3564 } else { 3565 nocb_tasks = NULL; 3566 } 3567 for (i = 0; i < nrealnocbers; i++) { 3568 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3569 if (torture_init_error(firsterr)) 3570 goto unwind; 3571 } 3572 if (stat_interval > 0) { 3573 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3574 stats_task); 3575 if (torture_init_error(firsterr)) 3576 goto unwind; 3577 } 3578 if (test_no_idle_hz && shuffle_interval > 0) { 3579 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3580 if (torture_init_error(firsterr)) 3581 goto unwind; 3582 } 3583 if (stutter < 0) 3584 stutter = 0; 3585 if (stutter) { 3586 int t; 3587 3588 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3589 firsterr = torture_stutter_init(stutter * HZ, t); 3590 if (torture_init_error(firsterr)) 3591 goto unwind; 3592 } 3593 if (fqs_duration < 0) 3594 fqs_duration = 0; 3595 if (fqs_duration) { 3596 /* Create the fqs thread */ 3597 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3598 fqs_task); 3599 if (torture_init_error(firsterr)) 3600 goto unwind; 3601 } 3602 if (test_boost_interval < 1) 3603 test_boost_interval = 1; 3604 if (test_boost_duration < 2) 3605 test_boost_duration = 2; 3606 if (rcu_torture_can_boost()) { 3607 3608 boost_starttime = jiffies + test_boost_interval * HZ; 3609 3610 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3611 rcutorture_booster_init, 3612 rcutorture_booster_cleanup); 3613 rcutor_hp = firsterr; 3614 if (torture_init_error(firsterr)) 3615 goto unwind; 3616 } 3617 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3618 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3619 if (torture_init_error(firsterr)) 3620 goto unwind; 3621 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3622 rcutorture_sync); 3623 if (torture_init_error(firsterr)) 3624 goto unwind; 3625 firsterr = rcu_torture_stall_init(); 3626 if (torture_init_error(firsterr)) 3627 goto unwind; 3628 firsterr = rcu_torture_fwd_prog_init(); 3629 if (torture_init_error(firsterr)) 3630 goto unwind; 3631 firsterr = rcu_torture_barrier_init(); 3632 if (torture_init_error(firsterr)) 3633 goto unwind; 3634 firsterr = rcu_torture_read_exit_init(); 3635 if (torture_init_error(firsterr)) 3636 goto unwind; 3637 if (object_debug) 3638 rcu_test_debug_objects(); 3639 torture_init_end(); 3640 rcu_gp_slow_register(&rcu_fwd_cb_nodelay); 3641 return 0; 3642 3643 unwind: 3644 torture_init_end(); 3645 rcu_torture_cleanup(); 3646 if (shutdown_secs) { 3647 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3648 kernel_power_off(); 3649 } 3650 return firsterr; 3651 } 3652 3653 module_init(rcu_torture_init); 3654 module_exit(rcu_torture_cleanup); 3655