1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_LICENSE("GPL"); 55 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 56 57 /* Bits for ->extendables field, extendables param, and related definitions. */ 58 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 59 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 60 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 61 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 62 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 63 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 64 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 65 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 66 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 67 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 68 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 69 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 70 #define RCUTORTURE_MAX_EXTEND \ 71 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 72 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 73 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 74 /* Must be power of two minus one. */ 75 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 76 77 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 78 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 79 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 80 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 81 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 82 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 83 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 84 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 85 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 86 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 87 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 88 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 89 torture_param(bool, gp_cond_exp_full, false, 90 "Use conditional/async full-stateexpedited GP wait primitives"); 91 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 92 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 93 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 94 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 95 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 96 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 97 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 98 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 99 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 100 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 101 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 102 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 103 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 104 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 105 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 106 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 107 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 108 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 109 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 110 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 111 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 112 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 113 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 114 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 115 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 116 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 117 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 118 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 119 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 120 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 121 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 122 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 123 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 124 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 125 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 126 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 127 128 static char *torture_type = "rcu"; 129 module_param(torture_type, charp, 0444); 130 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 131 132 static int nrealnocbers; 133 static int nrealreaders; 134 static struct task_struct *writer_task; 135 static struct task_struct **fakewriter_tasks; 136 static struct task_struct **reader_tasks; 137 static struct task_struct **nocb_tasks; 138 static struct task_struct *stats_task; 139 static struct task_struct *fqs_task; 140 static struct task_struct *boost_tasks[NR_CPUS]; 141 static struct task_struct *stall_task; 142 static struct task_struct **fwd_prog_tasks; 143 static struct task_struct **barrier_cbs_tasks; 144 static struct task_struct *barrier_task; 145 static struct task_struct *read_exit_task; 146 147 #define RCU_TORTURE_PIPE_LEN 10 148 149 // Mailbox-like structure to check RCU global memory ordering. 150 struct rcu_torture_reader_check { 151 unsigned long rtc_myloops; 152 int rtc_chkrdr; 153 unsigned long rtc_chkloops; 154 int rtc_ready; 155 struct rcu_torture_reader_check *rtc_assigner; 156 } ____cacheline_internodealigned_in_smp; 157 158 // Update-side data structure used to check RCU readers. 159 struct rcu_torture { 160 struct rcu_head rtort_rcu; 161 int rtort_pipe_count; 162 struct list_head rtort_free; 163 int rtort_mbtest; 164 struct rcu_torture_reader_check *rtort_chkp; 165 }; 166 167 static LIST_HEAD(rcu_torture_freelist); 168 static struct rcu_torture __rcu *rcu_torture_current; 169 static unsigned long rcu_torture_current_version; 170 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 171 static DEFINE_SPINLOCK(rcu_torture_lock); 172 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 174 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 175 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 176 static atomic_t n_rcu_torture_alloc; 177 static atomic_t n_rcu_torture_alloc_fail; 178 static atomic_t n_rcu_torture_free; 179 static atomic_t n_rcu_torture_mberror; 180 static atomic_t n_rcu_torture_mbchk_fail; 181 static atomic_t n_rcu_torture_mbchk_tries; 182 static atomic_t n_rcu_torture_error; 183 static long n_rcu_torture_barrier_error; 184 static long n_rcu_torture_boost_ktrerror; 185 static long n_rcu_torture_boost_failure; 186 static long n_rcu_torture_boosts; 187 static atomic_long_t n_rcu_torture_timers; 188 static long n_barrier_attempts; 189 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 190 static unsigned long n_read_exits; 191 static struct list_head rcu_torture_removed; 192 static unsigned long shutdown_jiffies; 193 static unsigned long start_gp_seq; 194 static atomic_long_t n_nocb_offload; 195 static atomic_long_t n_nocb_deoffload; 196 197 static int rcu_torture_writer_state; 198 #define RTWS_FIXED_DELAY 0 199 #define RTWS_DELAY 1 200 #define RTWS_REPLACE 2 201 #define RTWS_DEF_FREE 3 202 #define RTWS_EXP_SYNC 4 203 #define RTWS_COND_GET 5 204 #define RTWS_COND_GET_FULL 6 205 #define RTWS_COND_GET_EXP 7 206 #define RTWS_COND_GET_EXP_FULL 8 207 #define RTWS_COND_SYNC 9 208 #define RTWS_COND_SYNC_FULL 10 209 #define RTWS_COND_SYNC_EXP 11 210 #define RTWS_COND_SYNC_EXP_FULL 12 211 #define RTWS_POLL_GET 13 212 #define RTWS_POLL_GET_FULL 14 213 #define RTWS_POLL_GET_EXP 15 214 #define RTWS_POLL_GET_EXP_FULL 16 215 #define RTWS_POLL_WAIT 17 216 #define RTWS_POLL_WAIT_FULL 18 217 #define RTWS_POLL_WAIT_EXP 19 218 #define RTWS_POLL_WAIT_EXP_FULL 20 219 #define RTWS_SYNC 21 220 #define RTWS_STUTTER 22 221 #define RTWS_STOPPING 23 222 static const char * const rcu_torture_writer_state_names[] = { 223 "RTWS_FIXED_DELAY", 224 "RTWS_DELAY", 225 "RTWS_REPLACE", 226 "RTWS_DEF_FREE", 227 "RTWS_EXP_SYNC", 228 "RTWS_COND_GET", 229 "RTWS_COND_GET_FULL", 230 "RTWS_COND_GET_EXP", 231 "RTWS_COND_GET_EXP_FULL", 232 "RTWS_COND_SYNC", 233 "RTWS_COND_SYNC_FULL", 234 "RTWS_COND_SYNC_EXP", 235 "RTWS_COND_SYNC_EXP_FULL", 236 "RTWS_POLL_GET", 237 "RTWS_POLL_GET_FULL", 238 "RTWS_POLL_GET_EXP", 239 "RTWS_POLL_GET_EXP_FULL", 240 "RTWS_POLL_WAIT", 241 "RTWS_POLL_WAIT_FULL", 242 "RTWS_POLL_WAIT_EXP", 243 "RTWS_POLL_WAIT_EXP_FULL", 244 "RTWS_SYNC", 245 "RTWS_STUTTER", 246 "RTWS_STOPPING", 247 }; 248 249 /* Record reader segment types and duration for first failing read. */ 250 struct rt_read_seg { 251 int rt_readstate; 252 unsigned long rt_delay_jiffies; 253 unsigned long rt_delay_ms; 254 unsigned long rt_delay_us; 255 bool rt_preempted; 256 }; 257 static int err_segs_recorded; 258 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 259 static int rt_read_nsegs; 260 261 static const char *rcu_torture_writer_state_getname(void) 262 { 263 unsigned int i = READ_ONCE(rcu_torture_writer_state); 264 265 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 266 return "???"; 267 return rcu_torture_writer_state_names[i]; 268 } 269 270 #ifdef CONFIG_RCU_TRACE 271 static u64 notrace rcu_trace_clock_local(void) 272 { 273 u64 ts = trace_clock_local(); 274 275 (void)do_div(ts, NSEC_PER_USEC); 276 return ts; 277 } 278 #else /* #ifdef CONFIG_RCU_TRACE */ 279 static u64 notrace rcu_trace_clock_local(void) 280 { 281 return 0ULL; 282 } 283 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 284 285 /* 286 * Stop aggressive CPU-hog tests a bit before the end of the test in order 287 * to avoid interfering with test shutdown. 288 */ 289 static bool shutdown_time_arrived(void) 290 { 291 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 292 } 293 294 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 295 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 296 /* and boost task create/destroy. */ 297 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 298 static bool barrier_phase; /* Test phase. */ 299 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 300 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 301 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 302 303 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 304 305 /* 306 * Allocate an element from the rcu_tortures pool. 307 */ 308 static struct rcu_torture * 309 rcu_torture_alloc(void) 310 { 311 struct list_head *p; 312 313 spin_lock_bh(&rcu_torture_lock); 314 if (list_empty(&rcu_torture_freelist)) { 315 atomic_inc(&n_rcu_torture_alloc_fail); 316 spin_unlock_bh(&rcu_torture_lock); 317 return NULL; 318 } 319 atomic_inc(&n_rcu_torture_alloc); 320 p = rcu_torture_freelist.next; 321 list_del_init(p); 322 spin_unlock_bh(&rcu_torture_lock); 323 return container_of(p, struct rcu_torture, rtort_free); 324 } 325 326 /* 327 * Free an element to the rcu_tortures pool. 328 */ 329 static void 330 rcu_torture_free(struct rcu_torture *p) 331 { 332 atomic_inc(&n_rcu_torture_free); 333 spin_lock_bh(&rcu_torture_lock); 334 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 335 spin_unlock_bh(&rcu_torture_lock); 336 } 337 338 /* 339 * Operations vector for selecting different types of tests. 340 */ 341 342 struct rcu_torture_ops { 343 int ttype; 344 void (*init)(void); 345 void (*cleanup)(void); 346 int (*readlock)(void); 347 void (*read_delay)(struct torture_random_state *rrsp, 348 struct rt_read_seg *rtrsp); 349 void (*readunlock)(int idx); 350 int (*readlock_held)(void); 351 unsigned long (*get_gp_seq)(void); 352 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 353 void (*deferred_free)(struct rcu_torture *p); 354 void (*sync)(void); 355 void (*exp_sync)(void); 356 unsigned long (*get_gp_state_exp)(void); 357 unsigned long (*start_gp_poll_exp)(void); 358 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 359 bool (*poll_gp_state_exp)(unsigned long oldstate); 360 void (*cond_sync_exp)(unsigned long oldstate); 361 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 362 unsigned long (*get_comp_state)(void); 363 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 364 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 365 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 366 unsigned long (*get_gp_state)(void); 367 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 368 unsigned long (*get_gp_completed)(void); 369 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); 370 unsigned long (*start_gp_poll)(void); 371 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 372 bool (*poll_gp_state)(unsigned long oldstate); 373 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 374 bool (*poll_need_2gp)(bool poll, bool poll_full); 375 void (*cond_sync)(unsigned long oldstate); 376 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 377 call_rcu_func_t call; 378 void (*cb_barrier)(void); 379 void (*fqs)(void); 380 void (*stats)(void); 381 void (*gp_kthread_dbg)(void); 382 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 383 int (*stall_dur)(void); 384 long cbflood_max; 385 int irq_capable; 386 int can_boost; 387 int extendables; 388 int slow_gps; 389 int no_pi_lock; 390 const char *name; 391 }; 392 393 static struct rcu_torture_ops *cur_ops; 394 395 /* 396 * Definitions for rcu torture testing. 397 */ 398 399 static int torture_readlock_not_held(void) 400 { 401 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 402 } 403 404 static int rcu_torture_read_lock(void) 405 { 406 rcu_read_lock(); 407 return 0; 408 } 409 410 static void 411 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 412 { 413 unsigned long started; 414 unsigned long completed; 415 const unsigned long shortdelay_us = 200; 416 unsigned long longdelay_ms = 300; 417 unsigned long long ts; 418 419 /* We want a short delay sometimes to make a reader delay the grace 420 * period, and we want a long delay occasionally to trigger 421 * force_quiescent_state. */ 422 423 if (!atomic_read(&rcu_fwd_cb_nodelay) && 424 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 425 started = cur_ops->get_gp_seq(); 426 ts = rcu_trace_clock_local(); 427 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 428 longdelay_ms = 5; /* Avoid triggering BH limits. */ 429 mdelay(longdelay_ms); 430 rtrsp->rt_delay_ms = longdelay_ms; 431 completed = cur_ops->get_gp_seq(); 432 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 433 started, completed); 434 } 435 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 436 udelay(shortdelay_us); 437 rtrsp->rt_delay_us = shortdelay_us; 438 } 439 if (!preempt_count() && 440 !(torture_random(rrsp) % (nrealreaders * 500))) { 441 torture_preempt_schedule(); /* QS only if preemptible. */ 442 rtrsp->rt_preempted = true; 443 } 444 } 445 446 static void rcu_torture_read_unlock(int idx) 447 { 448 rcu_read_unlock(); 449 } 450 451 /* 452 * Update callback in the pipe. This should be invoked after a grace period. 453 */ 454 static bool 455 rcu_torture_pipe_update_one(struct rcu_torture *rp) 456 { 457 int i; 458 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 459 460 if (rtrcp) { 461 WRITE_ONCE(rp->rtort_chkp, NULL); 462 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 463 } 464 i = READ_ONCE(rp->rtort_pipe_count); 465 if (i > RCU_TORTURE_PIPE_LEN) 466 i = RCU_TORTURE_PIPE_LEN; 467 atomic_inc(&rcu_torture_wcount[i]); 468 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 469 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 470 rp->rtort_mbtest = 0; 471 return true; 472 } 473 return false; 474 } 475 476 /* 477 * Update all callbacks in the pipe. Suitable for synchronous grace-period 478 * primitives. 479 */ 480 static void 481 rcu_torture_pipe_update(struct rcu_torture *old_rp) 482 { 483 struct rcu_torture *rp; 484 struct rcu_torture *rp1; 485 486 if (old_rp) 487 list_add(&old_rp->rtort_free, &rcu_torture_removed); 488 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 489 if (rcu_torture_pipe_update_one(rp)) { 490 list_del(&rp->rtort_free); 491 rcu_torture_free(rp); 492 } 493 } 494 } 495 496 static void 497 rcu_torture_cb(struct rcu_head *p) 498 { 499 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 500 501 if (torture_must_stop_irq()) { 502 /* Test is ending, just drop callbacks on the floor. */ 503 /* The next initialization will pick up the pieces. */ 504 return; 505 } 506 if (rcu_torture_pipe_update_one(rp)) 507 rcu_torture_free(rp); 508 else 509 cur_ops->deferred_free(rp); 510 } 511 512 static unsigned long rcu_no_completed(void) 513 { 514 return 0; 515 } 516 517 static void rcu_torture_deferred_free(struct rcu_torture *p) 518 { 519 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 520 } 521 522 static void rcu_sync_torture_init(void) 523 { 524 INIT_LIST_HEAD(&rcu_torture_removed); 525 } 526 527 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 528 { 529 return poll; 530 } 531 532 static struct rcu_torture_ops rcu_ops = { 533 .ttype = RCU_FLAVOR, 534 .init = rcu_sync_torture_init, 535 .readlock = rcu_torture_read_lock, 536 .read_delay = rcu_read_delay, 537 .readunlock = rcu_torture_read_unlock, 538 .readlock_held = torture_readlock_not_held, 539 .get_gp_seq = rcu_get_gp_seq, 540 .gp_diff = rcu_seq_diff, 541 .deferred_free = rcu_torture_deferred_free, 542 .sync = synchronize_rcu, 543 .exp_sync = synchronize_rcu_expedited, 544 .same_gp_state = same_state_synchronize_rcu, 545 .same_gp_state_full = same_state_synchronize_rcu_full, 546 .get_comp_state = get_completed_synchronize_rcu, 547 .get_comp_state_full = get_completed_synchronize_rcu_full, 548 .get_gp_state = get_state_synchronize_rcu, 549 .get_gp_state_full = get_state_synchronize_rcu_full, 550 .get_gp_completed = get_completed_synchronize_rcu, 551 .get_gp_completed_full = get_completed_synchronize_rcu_full, 552 .start_gp_poll = start_poll_synchronize_rcu, 553 .start_gp_poll_full = start_poll_synchronize_rcu_full, 554 .poll_gp_state = poll_state_synchronize_rcu, 555 .poll_gp_state_full = poll_state_synchronize_rcu_full, 556 .poll_need_2gp = rcu_poll_need_2gp, 557 .cond_sync = cond_synchronize_rcu, 558 .cond_sync_full = cond_synchronize_rcu_full, 559 .get_gp_state_exp = get_state_synchronize_rcu, 560 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 561 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 562 .poll_gp_state_exp = poll_state_synchronize_rcu, 563 .cond_sync_exp = cond_synchronize_rcu_expedited, 564 .call = call_rcu_hurry, 565 .cb_barrier = rcu_barrier, 566 .fqs = rcu_force_quiescent_state, 567 .stats = NULL, 568 .gp_kthread_dbg = show_rcu_gp_kthreads, 569 .check_boost_failed = rcu_check_boost_fail, 570 .stall_dur = rcu_jiffies_till_stall_check, 571 .irq_capable = 1, 572 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 573 .extendables = RCUTORTURE_MAX_EXTEND, 574 .name = "rcu" 575 }; 576 577 /* 578 * Don't even think about trying any of these in real life!!! 579 * The names includes "busted", and they really means it! 580 * The only purpose of these functions is to provide a buggy RCU 581 * implementation to make sure that rcutorture correctly emits 582 * buggy-RCU error messages. 583 */ 584 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 585 { 586 /* This is a deliberate bug for testing purposes only! */ 587 rcu_torture_cb(&p->rtort_rcu); 588 } 589 590 static void synchronize_rcu_busted(void) 591 { 592 /* This is a deliberate bug for testing purposes only! */ 593 } 594 595 static void 596 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 597 { 598 /* This is a deliberate bug for testing purposes only! */ 599 func(head); 600 } 601 602 static struct rcu_torture_ops rcu_busted_ops = { 603 .ttype = INVALID_RCU_FLAVOR, 604 .init = rcu_sync_torture_init, 605 .readlock = rcu_torture_read_lock, 606 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 607 .readunlock = rcu_torture_read_unlock, 608 .readlock_held = torture_readlock_not_held, 609 .get_gp_seq = rcu_no_completed, 610 .deferred_free = rcu_busted_torture_deferred_free, 611 .sync = synchronize_rcu_busted, 612 .exp_sync = synchronize_rcu_busted, 613 .call = call_rcu_busted, 614 .cb_barrier = NULL, 615 .fqs = NULL, 616 .stats = NULL, 617 .irq_capable = 1, 618 .name = "busted" 619 }; 620 621 /* 622 * Definitions for srcu torture testing. 623 */ 624 625 DEFINE_STATIC_SRCU(srcu_ctl); 626 static struct srcu_struct srcu_ctld; 627 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 628 static struct rcu_torture_ops srcud_ops; 629 630 static int srcu_torture_read_lock(void) 631 { 632 if (cur_ops == &srcud_ops) 633 return srcu_read_lock_nmisafe(srcu_ctlp); 634 else 635 return srcu_read_lock(srcu_ctlp); 636 } 637 638 static void 639 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 640 { 641 long delay; 642 const long uspertick = 1000000 / HZ; 643 const long longdelay = 10; 644 645 /* We want there to be long-running readers, but not all the time. */ 646 647 delay = torture_random(rrsp) % 648 (nrealreaders * 2 * longdelay * uspertick); 649 if (!delay && in_task()) { 650 schedule_timeout_interruptible(longdelay); 651 rtrsp->rt_delay_jiffies = longdelay; 652 } else { 653 rcu_read_delay(rrsp, rtrsp); 654 } 655 } 656 657 static void srcu_torture_read_unlock(int idx) 658 { 659 if (cur_ops == &srcud_ops) 660 srcu_read_unlock_nmisafe(srcu_ctlp, idx); 661 else 662 srcu_read_unlock(srcu_ctlp, idx); 663 } 664 665 static int torture_srcu_read_lock_held(void) 666 { 667 return srcu_read_lock_held(srcu_ctlp); 668 } 669 670 static unsigned long srcu_torture_completed(void) 671 { 672 return srcu_batches_completed(srcu_ctlp); 673 } 674 675 static void srcu_torture_deferred_free(struct rcu_torture *rp) 676 { 677 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 678 } 679 680 static void srcu_torture_synchronize(void) 681 { 682 synchronize_srcu(srcu_ctlp); 683 } 684 685 static unsigned long srcu_torture_get_gp_state(void) 686 { 687 return get_state_synchronize_srcu(srcu_ctlp); 688 } 689 690 static unsigned long srcu_torture_start_gp_poll(void) 691 { 692 return start_poll_synchronize_srcu(srcu_ctlp); 693 } 694 695 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 696 { 697 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 698 } 699 700 static void srcu_torture_call(struct rcu_head *head, 701 rcu_callback_t func) 702 { 703 call_srcu(srcu_ctlp, head, func); 704 } 705 706 static void srcu_torture_barrier(void) 707 { 708 srcu_barrier(srcu_ctlp); 709 } 710 711 static void srcu_torture_stats(void) 712 { 713 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 714 } 715 716 static void srcu_torture_synchronize_expedited(void) 717 { 718 synchronize_srcu_expedited(srcu_ctlp); 719 } 720 721 static struct rcu_torture_ops srcu_ops = { 722 .ttype = SRCU_FLAVOR, 723 .init = rcu_sync_torture_init, 724 .readlock = srcu_torture_read_lock, 725 .read_delay = srcu_read_delay, 726 .readunlock = srcu_torture_read_unlock, 727 .readlock_held = torture_srcu_read_lock_held, 728 .get_gp_seq = srcu_torture_completed, 729 .deferred_free = srcu_torture_deferred_free, 730 .sync = srcu_torture_synchronize, 731 .exp_sync = srcu_torture_synchronize_expedited, 732 .get_gp_state = srcu_torture_get_gp_state, 733 .start_gp_poll = srcu_torture_start_gp_poll, 734 .poll_gp_state = srcu_torture_poll_gp_state, 735 .call = srcu_torture_call, 736 .cb_barrier = srcu_torture_barrier, 737 .stats = srcu_torture_stats, 738 .cbflood_max = 50000, 739 .irq_capable = 1, 740 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 741 .name = "srcu" 742 }; 743 744 static void srcu_torture_init(void) 745 { 746 rcu_sync_torture_init(); 747 WARN_ON(init_srcu_struct(&srcu_ctld)); 748 srcu_ctlp = &srcu_ctld; 749 } 750 751 static void srcu_torture_cleanup(void) 752 { 753 cleanup_srcu_struct(&srcu_ctld); 754 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 755 } 756 757 /* As above, but dynamically allocated. */ 758 static struct rcu_torture_ops srcud_ops = { 759 .ttype = SRCU_FLAVOR, 760 .init = srcu_torture_init, 761 .cleanup = srcu_torture_cleanup, 762 .readlock = srcu_torture_read_lock, 763 .read_delay = srcu_read_delay, 764 .readunlock = srcu_torture_read_unlock, 765 .readlock_held = torture_srcu_read_lock_held, 766 .get_gp_seq = srcu_torture_completed, 767 .deferred_free = srcu_torture_deferred_free, 768 .sync = srcu_torture_synchronize, 769 .exp_sync = srcu_torture_synchronize_expedited, 770 .get_gp_state = srcu_torture_get_gp_state, 771 .start_gp_poll = srcu_torture_start_gp_poll, 772 .poll_gp_state = srcu_torture_poll_gp_state, 773 .call = srcu_torture_call, 774 .cb_barrier = srcu_torture_barrier, 775 .stats = srcu_torture_stats, 776 .cbflood_max = 50000, 777 .irq_capable = 1, 778 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 779 .name = "srcud" 780 }; 781 782 /* As above, but broken due to inappropriate reader extension. */ 783 static struct rcu_torture_ops busted_srcud_ops = { 784 .ttype = SRCU_FLAVOR, 785 .init = srcu_torture_init, 786 .cleanup = srcu_torture_cleanup, 787 .readlock = srcu_torture_read_lock, 788 .read_delay = rcu_read_delay, 789 .readunlock = srcu_torture_read_unlock, 790 .readlock_held = torture_srcu_read_lock_held, 791 .get_gp_seq = srcu_torture_completed, 792 .deferred_free = srcu_torture_deferred_free, 793 .sync = srcu_torture_synchronize, 794 .exp_sync = srcu_torture_synchronize_expedited, 795 .call = srcu_torture_call, 796 .cb_barrier = srcu_torture_barrier, 797 .stats = srcu_torture_stats, 798 .irq_capable = 1, 799 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 800 .extendables = RCUTORTURE_MAX_EXTEND, 801 .name = "busted_srcud" 802 }; 803 804 /* 805 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 806 * This implementation does not necessarily work well with CPU hotplug. 807 */ 808 809 static void synchronize_rcu_trivial(void) 810 { 811 int cpu; 812 813 for_each_online_cpu(cpu) { 814 torture_sched_setaffinity(current->pid, cpumask_of(cpu)); 815 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 816 } 817 } 818 819 static int rcu_torture_read_lock_trivial(void) 820 { 821 preempt_disable(); 822 return 0; 823 } 824 825 static void rcu_torture_read_unlock_trivial(int idx) 826 { 827 preempt_enable(); 828 } 829 830 static struct rcu_torture_ops trivial_ops = { 831 .ttype = RCU_TRIVIAL_FLAVOR, 832 .init = rcu_sync_torture_init, 833 .readlock = rcu_torture_read_lock_trivial, 834 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 835 .readunlock = rcu_torture_read_unlock_trivial, 836 .readlock_held = torture_readlock_not_held, 837 .get_gp_seq = rcu_no_completed, 838 .sync = synchronize_rcu_trivial, 839 .exp_sync = synchronize_rcu_trivial, 840 .fqs = NULL, 841 .stats = NULL, 842 .irq_capable = 1, 843 .name = "trivial" 844 }; 845 846 #ifdef CONFIG_TASKS_RCU 847 848 /* 849 * Definitions for RCU-tasks torture testing. 850 */ 851 852 static int tasks_torture_read_lock(void) 853 { 854 return 0; 855 } 856 857 static void tasks_torture_read_unlock(int idx) 858 { 859 } 860 861 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 862 { 863 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 864 } 865 866 static void synchronize_rcu_mult_test(void) 867 { 868 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 869 } 870 871 static struct rcu_torture_ops tasks_ops = { 872 .ttype = RCU_TASKS_FLAVOR, 873 .init = rcu_sync_torture_init, 874 .readlock = tasks_torture_read_lock, 875 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 876 .readunlock = tasks_torture_read_unlock, 877 .get_gp_seq = rcu_no_completed, 878 .deferred_free = rcu_tasks_torture_deferred_free, 879 .sync = synchronize_rcu_tasks, 880 .exp_sync = synchronize_rcu_mult_test, 881 .call = call_rcu_tasks, 882 .cb_barrier = rcu_barrier_tasks, 883 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 884 .fqs = NULL, 885 .stats = NULL, 886 .irq_capable = 1, 887 .slow_gps = 1, 888 .name = "tasks" 889 }; 890 891 #define TASKS_OPS &tasks_ops, 892 893 #else // #ifdef CONFIG_TASKS_RCU 894 895 #define TASKS_OPS 896 897 #endif // #else #ifdef CONFIG_TASKS_RCU 898 899 900 #ifdef CONFIG_TASKS_RUDE_RCU 901 902 /* 903 * Definitions for rude RCU-tasks torture testing. 904 */ 905 906 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 907 { 908 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 909 } 910 911 static struct rcu_torture_ops tasks_rude_ops = { 912 .ttype = RCU_TASKS_RUDE_FLAVOR, 913 .init = rcu_sync_torture_init, 914 .readlock = rcu_torture_read_lock_trivial, 915 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 916 .readunlock = rcu_torture_read_unlock_trivial, 917 .get_gp_seq = rcu_no_completed, 918 .deferred_free = rcu_tasks_rude_torture_deferred_free, 919 .sync = synchronize_rcu_tasks_rude, 920 .exp_sync = synchronize_rcu_tasks_rude, 921 .call = call_rcu_tasks_rude, 922 .cb_barrier = rcu_barrier_tasks_rude, 923 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 924 .cbflood_max = 50000, 925 .fqs = NULL, 926 .stats = NULL, 927 .irq_capable = 1, 928 .name = "tasks-rude" 929 }; 930 931 #define TASKS_RUDE_OPS &tasks_rude_ops, 932 933 #else // #ifdef CONFIG_TASKS_RUDE_RCU 934 935 #define TASKS_RUDE_OPS 936 937 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 938 939 940 #ifdef CONFIG_TASKS_TRACE_RCU 941 942 /* 943 * Definitions for tracing RCU-tasks torture testing. 944 */ 945 946 static int tasks_tracing_torture_read_lock(void) 947 { 948 rcu_read_lock_trace(); 949 return 0; 950 } 951 952 static void tasks_tracing_torture_read_unlock(int idx) 953 { 954 rcu_read_unlock_trace(); 955 } 956 957 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 958 { 959 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 960 } 961 962 static struct rcu_torture_ops tasks_tracing_ops = { 963 .ttype = RCU_TASKS_TRACING_FLAVOR, 964 .init = rcu_sync_torture_init, 965 .readlock = tasks_tracing_torture_read_lock, 966 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 967 .readunlock = tasks_tracing_torture_read_unlock, 968 .readlock_held = rcu_read_lock_trace_held, 969 .get_gp_seq = rcu_no_completed, 970 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 971 .sync = synchronize_rcu_tasks_trace, 972 .exp_sync = synchronize_rcu_tasks_trace, 973 .call = call_rcu_tasks_trace, 974 .cb_barrier = rcu_barrier_tasks_trace, 975 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 976 .cbflood_max = 50000, 977 .fqs = NULL, 978 .stats = NULL, 979 .irq_capable = 1, 980 .slow_gps = 1, 981 .name = "tasks-tracing" 982 }; 983 984 #define TASKS_TRACING_OPS &tasks_tracing_ops, 985 986 #else // #ifdef CONFIG_TASKS_TRACE_RCU 987 988 #define TASKS_TRACING_OPS 989 990 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 991 992 993 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 994 { 995 if (!cur_ops->gp_diff) 996 return new - old; 997 return cur_ops->gp_diff(new, old); 998 } 999 1000 /* 1001 * RCU torture priority-boost testing. Runs one real-time thread per 1002 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1003 * for them to complete. If a given grace period takes too long, we assume 1004 * that priority inversion has occurred. 1005 */ 1006 1007 static int old_rt_runtime = -1; 1008 1009 static void rcu_torture_disable_rt_throttle(void) 1010 { 1011 /* 1012 * Disable RT throttling so that rcutorture's boost threads don't get 1013 * throttled. Only possible if rcutorture is built-in otherwise the 1014 * user should manually do this by setting the sched_rt_period_us and 1015 * sched_rt_runtime sysctls. 1016 */ 1017 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1018 return; 1019 1020 old_rt_runtime = sysctl_sched_rt_runtime; 1021 sysctl_sched_rt_runtime = -1; 1022 } 1023 1024 static void rcu_torture_enable_rt_throttle(void) 1025 { 1026 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1027 return; 1028 1029 sysctl_sched_rt_runtime = old_rt_runtime; 1030 old_rt_runtime = -1; 1031 } 1032 1033 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1034 { 1035 int cpu; 1036 static int dbg_done; 1037 unsigned long end = jiffies; 1038 bool gp_done; 1039 unsigned long j; 1040 static unsigned long last_persist; 1041 unsigned long lp; 1042 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1043 1044 if (end - *start > mininterval) { 1045 // Recheck after checking time to avoid false positives. 1046 smp_mb(); // Time check before grace-period check. 1047 if (cur_ops->poll_gp_state(gp_state)) 1048 return false; // passed, though perhaps just barely 1049 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1050 // At most one persisted message per boost test. 1051 j = jiffies; 1052 lp = READ_ONCE(last_persist); 1053 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1054 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1055 return false; // passed on a technicality 1056 } 1057 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1058 n_rcu_torture_boost_failure++; 1059 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1060 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1061 current->rt_priority, gp_state, end - *start); 1062 cur_ops->gp_kthread_dbg(); 1063 // Recheck after print to flag grace period ending during splat. 1064 gp_done = cur_ops->poll_gp_state(gp_state); 1065 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1066 gp_done ? "ended already" : "still pending"); 1067 1068 } 1069 1070 return true; // failed 1071 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1072 *start = jiffies; 1073 } 1074 1075 return false; // passed 1076 } 1077 1078 static int rcu_torture_boost(void *arg) 1079 { 1080 unsigned long endtime; 1081 unsigned long gp_state; 1082 unsigned long gp_state_time; 1083 unsigned long oldstarttime; 1084 1085 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1086 1087 /* Set real-time priority. */ 1088 sched_set_fifo_low(current); 1089 1090 /* Each pass through the following loop does one boost-test cycle. */ 1091 do { 1092 bool failed = false; // Test failed already in this test interval 1093 bool gp_initiated = false; 1094 1095 if (kthread_should_stop()) 1096 goto checkwait; 1097 1098 /* Wait for the next test interval. */ 1099 oldstarttime = READ_ONCE(boost_starttime); 1100 while (time_before(jiffies, oldstarttime)) { 1101 schedule_timeout_interruptible(oldstarttime - jiffies); 1102 if (stutter_wait("rcu_torture_boost")) 1103 sched_set_fifo_low(current); 1104 if (torture_must_stop()) 1105 goto checkwait; 1106 } 1107 1108 // Do one boost-test interval. 1109 endtime = oldstarttime + test_boost_duration * HZ; 1110 while (time_before(jiffies, endtime)) { 1111 // Has current GP gone too long? 1112 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1113 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1114 // If we don't have a grace period in flight, start one. 1115 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1116 gp_state = cur_ops->start_gp_poll(); 1117 gp_initiated = true; 1118 gp_state_time = jiffies; 1119 } 1120 if (stutter_wait("rcu_torture_boost")) { 1121 sched_set_fifo_low(current); 1122 // If the grace period already ended, 1123 // we don't know when that happened, so 1124 // start over. 1125 if (cur_ops->poll_gp_state(gp_state)) 1126 gp_initiated = false; 1127 } 1128 if (torture_must_stop()) 1129 goto checkwait; 1130 } 1131 1132 // In case the grace period extended beyond the end of the loop. 1133 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1134 rcu_torture_boost_failed(gp_state, &gp_state_time); 1135 1136 /* 1137 * Set the start time of the next test interval. 1138 * Yes, this is vulnerable to long delays, but such 1139 * delays simply cause a false negative for the next 1140 * interval. Besides, we are running at RT priority, 1141 * so delays should be relatively rare. 1142 */ 1143 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1144 if (mutex_trylock(&boost_mutex)) { 1145 if (oldstarttime == boost_starttime) { 1146 WRITE_ONCE(boost_starttime, 1147 jiffies + test_boost_interval * HZ); 1148 n_rcu_torture_boosts++; 1149 } 1150 mutex_unlock(&boost_mutex); 1151 break; 1152 } 1153 schedule_timeout_uninterruptible(HZ / 20); 1154 } 1155 1156 /* Go do the stutter. */ 1157 checkwait: if (stutter_wait("rcu_torture_boost")) 1158 sched_set_fifo_low(current); 1159 } while (!torture_must_stop()); 1160 1161 /* Clean up and exit. */ 1162 while (!kthread_should_stop()) { 1163 torture_shutdown_absorb("rcu_torture_boost"); 1164 schedule_timeout_uninterruptible(HZ / 20); 1165 } 1166 torture_kthread_stopping("rcu_torture_boost"); 1167 return 0; 1168 } 1169 1170 /* 1171 * RCU torture force-quiescent-state kthread. Repeatedly induces 1172 * bursts of calls to force_quiescent_state(), increasing the probability 1173 * of occurrence of some important types of race conditions. 1174 */ 1175 static int 1176 rcu_torture_fqs(void *arg) 1177 { 1178 unsigned long fqs_resume_time; 1179 int fqs_burst_remaining; 1180 int oldnice = task_nice(current); 1181 1182 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1183 do { 1184 fqs_resume_time = jiffies + fqs_stutter * HZ; 1185 while (time_before(jiffies, fqs_resume_time) && 1186 !kthread_should_stop()) { 1187 schedule_timeout_interruptible(HZ / 20); 1188 } 1189 fqs_burst_remaining = fqs_duration; 1190 while (fqs_burst_remaining > 0 && 1191 !kthread_should_stop()) { 1192 cur_ops->fqs(); 1193 udelay(fqs_holdoff); 1194 fqs_burst_remaining -= fqs_holdoff; 1195 } 1196 if (stutter_wait("rcu_torture_fqs")) 1197 sched_set_normal(current, oldnice); 1198 } while (!torture_must_stop()); 1199 torture_kthread_stopping("rcu_torture_fqs"); 1200 return 0; 1201 } 1202 1203 // Used by writers to randomly choose from the available grace-period primitives. 1204 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1205 static int nsynctypes; 1206 1207 /* 1208 * Determine which grace-period primitives are available. 1209 */ 1210 static void rcu_torture_write_types(void) 1211 { 1212 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1213 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1214 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1215 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1216 1217 /* Initialize synctype[] array. If none set, take default. */ 1218 if (!gp_cond1 && 1219 !gp_cond_exp1 && 1220 !gp_cond_full1 && 1221 !gp_cond_exp_full1 && 1222 !gp_exp1 && 1223 !gp_poll_exp1 && 1224 !gp_poll_exp_full1 && 1225 !gp_normal1 && 1226 !gp_poll1 && 1227 !gp_poll_full1 && 1228 !gp_sync1) { 1229 gp_cond1 = true; 1230 gp_cond_exp1 = true; 1231 gp_cond_full1 = true; 1232 gp_cond_exp_full1 = true; 1233 gp_exp1 = true; 1234 gp_poll_exp1 = true; 1235 gp_poll_exp_full1 = true; 1236 gp_normal1 = true; 1237 gp_poll1 = true; 1238 gp_poll_full1 = true; 1239 gp_sync1 = true; 1240 } 1241 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1242 synctype[nsynctypes++] = RTWS_COND_GET; 1243 pr_info("%s: Testing conditional GPs.\n", __func__); 1244 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1245 pr_alert("%s: gp_cond without primitives.\n", __func__); 1246 } 1247 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1248 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1249 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1250 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1251 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1252 } 1253 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1254 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1255 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1256 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1257 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1258 } 1259 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1260 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1261 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1262 } else if (gp_cond_exp_full && 1263 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1264 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1265 } 1266 if (gp_exp1 && cur_ops->exp_sync) { 1267 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1268 pr_info("%s: Testing expedited GPs.\n", __func__); 1269 } else if (gp_exp && !cur_ops->exp_sync) { 1270 pr_alert("%s: gp_exp without primitives.\n", __func__); 1271 } 1272 if (gp_normal1 && cur_ops->deferred_free) { 1273 synctype[nsynctypes++] = RTWS_DEF_FREE; 1274 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1275 } else if (gp_normal && !cur_ops->deferred_free) { 1276 pr_alert("%s: gp_normal without primitives.\n", __func__); 1277 } 1278 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1279 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1280 synctype[nsynctypes++] = RTWS_POLL_GET; 1281 pr_info("%s: Testing polling GPs.\n", __func__); 1282 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1283 pr_alert("%s: gp_poll without primitives.\n", __func__); 1284 } 1285 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1286 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1287 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1288 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1289 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1290 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1291 } 1292 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1293 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1294 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1295 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1296 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1297 } 1298 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1299 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1300 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1301 } else if (gp_poll_exp_full && 1302 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1303 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1304 } 1305 if (gp_sync1 && cur_ops->sync) { 1306 synctype[nsynctypes++] = RTWS_SYNC; 1307 pr_info("%s: Testing normal GPs.\n", __func__); 1308 } else if (gp_sync && !cur_ops->sync) { 1309 pr_alert("%s: gp_sync without primitives.\n", __func__); 1310 } 1311 } 1312 1313 /* 1314 * Do the specified rcu_torture_writer() synchronous grace period, 1315 * while also testing out the polled APIs. Note well that the single-CPU 1316 * grace-period optimizations must be accounted for. 1317 */ 1318 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1319 { 1320 unsigned long cookie; 1321 struct rcu_gp_oldstate cookie_full; 1322 bool dopoll; 1323 bool dopoll_full; 1324 unsigned long r = torture_random(trsp); 1325 1326 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1327 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1328 if (dopoll || dopoll_full) 1329 cpus_read_lock(); 1330 if (dopoll) 1331 cookie = cur_ops->get_gp_state(); 1332 if (dopoll_full) 1333 cur_ops->get_gp_state_full(&cookie_full); 1334 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1335 sync(); 1336 sync(); 1337 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1338 "%s: Cookie check 3 failed %pS() online %*pbl.", 1339 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1340 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1341 "%s: Cookie check 4 failed %pS() online %*pbl", 1342 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1343 if (dopoll || dopoll_full) 1344 cpus_read_unlock(); 1345 } 1346 1347 /* 1348 * RCU torture writer kthread. Repeatedly substitutes a new structure 1349 * for that pointed to by rcu_torture_current, freeing the old structure 1350 * after a series of grace periods (the "pipeline"). 1351 */ 1352 static int 1353 rcu_torture_writer(void *arg) 1354 { 1355 bool boot_ended; 1356 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1357 unsigned long cookie; 1358 struct rcu_gp_oldstate cookie_full; 1359 int expediting = 0; 1360 unsigned long gp_snap; 1361 unsigned long gp_snap1; 1362 struct rcu_gp_oldstate gp_snap_full; 1363 struct rcu_gp_oldstate gp_snap1_full; 1364 int i; 1365 int idx; 1366 int oldnice = task_nice(current); 1367 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE]; 1368 struct rcu_torture *rp; 1369 struct rcu_torture *old_rp; 1370 static DEFINE_TORTURE_RANDOM(rand); 1371 bool stutter_waited; 1372 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE]; 1373 1374 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1375 if (!can_expedite) 1376 pr_alert("%s" TORTURE_FLAG 1377 " GP expediting controlled from boot/sysfs for %s.\n", 1378 torture_type, cur_ops->name); 1379 if (WARN_ONCE(nsynctypes == 0, 1380 "%s: No update-side primitives.\n", __func__)) { 1381 /* 1382 * No updates primitives, so don't try updating. 1383 * The resulting test won't be testing much, hence the 1384 * above WARN_ONCE(). 1385 */ 1386 rcu_torture_writer_state = RTWS_STOPPING; 1387 torture_kthread_stopping("rcu_torture_writer"); 1388 return 0; 1389 } 1390 1391 do { 1392 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1393 torture_hrtimeout_us(500, 1000, &rand); 1394 rp = rcu_torture_alloc(); 1395 if (rp == NULL) 1396 continue; 1397 rp->rtort_pipe_count = 0; 1398 rcu_torture_writer_state = RTWS_DELAY; 1399 udelay(torture_random(&rand) & 0x3ff); 1400 rcu_torture_writer_state = RTWS_REPLACE; 1401 old_rp = rcu_dereference_check(rcu_torture_current, 1402 current == writer_task); 1403 rp->rtort_mbtest = 1; 1404 rcu_assign_pointer(rcu_torture_current, rp); 1405 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1406 if (old_rp) { 1407 i = old_rp->rtort_pipe_count; 1408 if (i > RCU_TORTURE_PIPE_LEN) 1409 i = RCU_TORTURE_PIPE_LEN; 1410 atomic_inc(&rcu_torture_wcount[i]); 1411 WRITE_ONCE(old_rp->rtort_pipe_count, 1412 old_rp->rtort_pipe_count + 1); 1413 1414 // Make sure readers block polled grace periods. 1415 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1416 idx = cur_ops->readlock(); 1417 cookie = cur_ops->get_gp_state(); 1418 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1419 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1420 __func__, 1421 rcu_torture_writer_state_getname(), 1422 rcu_torture_writer_state, 1423 cookie, cur_ops->get_gp_state()); 1424 if (cur_ops->get_gp_completed) { 1425 cookie = cur_ops->get_gp_completed(); 1426 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1427 } 1428 cur_ops->readunlock(idx); 1429 } 1430 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1431 idx = cur_ops->readlock(); 1432 cur_ops->get_gp_state_full(&cookie_full); 1433 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1434 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1435 __func__, 1436 rcu_torture_writer_state_getname(), 1437 rcu_torture_writer_state, 1438 cpumask_pr_args(cpu_online_mask)); 1439 if (cur_ops->get_gp_completed_full) { 1440 cur_ops->get_gp_completed_full(&cookie_full); 1441 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1442 } 1443 cur_ops->readunlock(idx); 1444 } 1445 switch (synctype[torture_random(&rand) % nsynctypes]) { 1446 case RTWS_DEF_FREE: 1447 rcu_torture_writer_state = RTWS_DEF_FREE; 1448 cur_ops->deferred_free(old_rp); 1449 break; 1450 case RTWS_EXP_SYNC: 1451 rcu_torture_writer_state = RTWS_EXP_SYNC; 1452 do_rtws_sync(&rand, cur_ops->exp_sync); 1453 rcu_torture_pipe_update(old_rp); 1454 break; 1455 case RTWS_COND_GET: 1456 rcu_torture_writer_state = RTWS_COND_GET; 1457 gp_snap = cur_ops->get_gp_state(); 1458 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1459 rcu_torture_writer_state = RTWS_COND_SYNC; 1460 cur_ops->cond_sync(gp_snap); 1461 rcu_torture_pipe_update(old_rp); 1462 break; 1463 case RTWS_COND_GET_EXP: 1464 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1465 gp_snap = cur_ops->get_gp_state_exp(); 1466 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1467 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1468 cur_ops->cond_sync_exp(gp_snap); 1469 rcu_torture_pipe_update(old_rp); 1470 break; 1471 case RTWS_COND_GET_FULL: 1472 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1473 cur_ops->get_gp_state_full(&gp_snap_full); 1474 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1475 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1476 cur_ops->cond_sync_full(&gp_snap_full); 1477 rcu_torture_pipe_update(old_rp); 1478 break; 1479 case RTWS_COND_GET_EXP_FULL: 1480 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1481 cur_ops->get_gp_state_full(&gp_snap_full); 1482 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1483 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1484 cur_ops->cond_sync_exp_full(&gp_snap_full); 1485 rcu_torture_pipe_update(old_rp); 1486 break; 1487 case RTWS_POLL_GET: 1488 rcu_torture_writer_state = RTWS_POLL_GET; 1489 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1490 ulo[i] = cur_ops->get_comp_state(); 1491 gp_snap = cur_ops->start_gp_poll(); 1492 rcu_torture_writer_state = RTWS_POLL_WAIT; 1493 while (!cur_ops->poll_gp_state(gp_snap)) { 1494 gp_snap1 = cur_ops->get_gp_state(); 1495 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1496 if (cur_ops->poll_gp_state(ulo[i]) || 1497 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1498 ulo[i] = gp_snap1; 1499 break; 1500 } 1501 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo)); 1502 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1503 &rand); 1504 } 1505 rcu_torture_pipe_update(old_rp); 1506 break; 1507 case RTWS_POLL_GET_FULL: 1508 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1509 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1510 cur_ops->get_comp_state_full(&rgo[i]); 1511 cur_ops->start_gp_poll_full(&gp_snap_full); 1512 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1513 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1514 cur_ops->get_gp_state_full(&gp_snap1_full); 1515 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1516 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1517 cur_ops->same_gp_state_full(&rgo[i], 1518 &gp_snap1_full)) { 1519 rgo[i] = gp_snap1_full; 1520 break; 1521 } 1522 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo)); 1523 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1524 &rand); 1525 } 1526 rcu_torture_pipe_update(old_rp); 1527 break; 1528 case RTWS_POLL_GET_EXP: 1529 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1530 gp_snap = cur_ops->start_gp_poll_exp(); 1531 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1532 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1533 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1534 &rand); 1535 rcu_torture_pipe_update(old_rp); 1536 break; 1537 case RTWS_POLL_GET_EXP_FULL: 1538 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1539 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1540 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1541 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1542 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1543 &rand); 1544 rcu_torture_pipe_update(old_rp); 1545 break; 1546 case RTWS_SYNC: 1547 rcu_torture_writer_state = RTWS_SYNC; 1548 do_rtws_sync(&rand, cur_ops->sync); 1549 rcu_torture_pipe_update(old_rp); 1550 break; 1551 default: 1552 WARN_ON_ONCE(1); 1553 break; 1554 } 1555 } 1556 WRITE_ONCE(rcu_torture_current_version, 1557 rcu_torture_current_version + 1); 1558 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1559 if (can_expedite && 1560 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1561 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1562 if (expediting >= 0) 1563 rcu_expedite_gp(); 1564 else 1565 rcu_unexpedite_gp(); 1566 if (++expediting > 3) 1567 expediting = -expediting; 1568 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1569 can_expedite = !rcu_gp_is_expedited() && 1570 !rcu_gp_is_normal(); 1571 } 1572 rcu_torture_writer_state = RTWS_STUTTER; 1573 boot_ended = rcu_inkernel_boot_has_ended(); 1574 stutter_waited = stutter_wait("rcu_torture_writer"); 1575 if (stutter_waited && 1576 !atomic_read(&rcu_fwd_cb_nodelay) && 1577 !cur_ops->slow_gps && 1578 !torture_must_stop() && 1579 boot_ended) 1580 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1581 if (list_empty(&rcu_tortures[i].rtort_free) && 1582 rcu_access_pointer(rcu_torture_current) != 1583 &rcu_tortures[i]) { 1584 tracing_off(); 1585 show_rcu_gp_kthreads(); 1586 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1587 rcu_ftrace_dump(DUMP_ALL); 1588 } 1589 if (stutter_waited) 1590 sched_set_normal(current, oldnice); 1591 } while (!torture_must_stop()); 1592 rcu_torture_current = NULL; // Let stats task know that we are done. 1593 /* Reset expediting back to unexpedited. */ 1594 if (expediting > 0) 1595 expediting = -expediting; 1596 while (can_expedite && expediting++ < 0) 1597 rcu_unexpedite_gp(); 1598 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1599 if (!can_expedite) 1600 pr_alert("%s" TORTURE_FLAG 1601 " Dynamic grace-period expediting was disabled.\n", 1602 torture_type); 1603 rcu_torture_writer_state = RTWS_STOPPING; 1604 torture_kthread_stopping("rcu_torture_writer"); 1605 return 0; 1606 } 1607 1608 /* 1609 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1610 * delay between calls. 1611 */ 1612 static int 1613 rcu_torture_fakewriter(void *arg) 1614 { 1615 unsigned long gp_snap; 1616 struct rcu_gp_oldstate gp_snap_full; 1617 DEFINE_TORTURE_RANDOM(rand); 1618 1619 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1620 set_user_nice(current, MAX_NICE); 1621 1622 if (WARN_ONCE(nsynctypes == 0, 1623 "%s: No update-side primitives.\n", __func__)) { 1624 /* 1625 * No updates primitives, so don't try updating. 1626 * The resulting test won't be testing much, hence the 1627 * above WARN_ONCE(). 1628 */ 1629 torture_kthread_stopping("rcu_torture_fakewriter"); 1630 return 0; 1631 } 1632 1633 do { 1634 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1635 if (cur_ops->cb_barrier != NULL && 1636 torture_random(&rand) % (nfakewriters * 8) == 0) { 1637 cur_ops->cb_barrier(); 1638 } else { 1639 switch (synctype[torture_random(&rand) % nsynctypes]) { 1640 case RTWS_DEF_FREE: 1641 break; 1642 case RTWS_EXP_SYNC: 1643 cur_ops->exp_sync(); 1644 break; 1645 case RTWS_COND_GET: 1646 gp_snap = cur_ops->get_gp_state(); 1647 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1648 cur_ops->cond_sync(gp_snap); 1649 break; 1650 case RTWS_COND_GET_EXP: 1651 gp_snap = cur_ops->get_gp_state_exp(); 1652 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1653 cur_ops->cond_sync_exp(gp_snap); 1654 break; 1655 case RTWS_COND_GET_FULL: 1656 cur_ops->get_gp_state_full(&gp_snap_full); 1657 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1658 cur_ops->cond_sync_full(&gp_snap_full); 1659 break; 1660 case RTWS_COND_GET_EXP_FULL: 1661 cur_ops->get_gp_state_full(&gp_snap_full); 1662 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1663 cur_ops->cond_sync_exp_full(&gp_snap_full); 1664 break; 1665 case RTWS_POLL_GET: 1666 gp_snap = cur_ops->start_gp_poll(); 1667 while (!cur_ops->poll_gp_state(gp_snap)) { 1668 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1669 &rand); 1670 } 1671 break; 1672 case RTWS_POLL_GET_FULL: 1673 cur_ops->start_gp_poll_full(&gp_snap_full); 1674 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1675 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1676 &rand); 1677 } 1678 break; 1679 case RTWS_POLL_GET_EXP: 1680 gp_snap = cur_ops->start_gp_poll_exp(); 1681 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1682 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1683 &rand); 1684 } 1685 break; 1686 case RTWS_POLL_GET_EXP_FULL: 1687 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1688 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1689 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1690 &rand); 1691 } 1692 break; 1693 case RTWS_SYNC: 1694 cur_ops->sync(); 1695 break; 1696 default: 1697 WARN_ON_ONCE(1); 1698 break; 1699 } 1700 } 1701 stutter_wait("rcu_torture_fakewriter"); 1702 } while (!torture_must_stop()); 1703 1704 torture_kthread_stopping("rcu_torture_fakewriter"); 1705 return 0; 1706 } 1707 1708 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1709 { 1710 kfree(rhp); 1711 } 1712 1713 // Set up and carry out testing of RCU's global memory ordering 1714 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1715 struct torture_random_state *trsp) 1716 { 1717 unsigned long loops; 1718 int noc = torture_num_online_cpus(); 1719 int rdrchked; 1720 int rdrchker; 1721 struct rcu_torture_reader_check *rtrcp; // Me. 1722 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1723 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1724 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1725 1726 if (myid < 0) 1727 return; // Don't try this from timer handlers. 1728 1729 // Increment my counter. 1730 rtrcp = &rcu_torture_reader_mbchk[myid]; 1731 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1732 1733 // Attempt to assign someone else some checking work. 1734 rdrchked = torture_random(trsp) % nrealreaders; 1735 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1736 rdrchker = torture_random(trsp) % nrealreaders; 1737 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1738 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1739 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1740 !READ_ONCE(rtp->rtort_chkp) && 1741 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1742 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1743 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1744 rtrcp->rtc_chkrdr = rdrchked; 1745 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1746 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1747 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1748 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1749 } 1750 1751 // If assigned some completed work, do it! 1752 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1753 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1754 return; // No work or work not yet ready. 1755 rdrchked = rtrcp_assigner->rtc_chkrdr; 1756 if (WARN_ON_ONCE(rdrchked < 0)) 1757 return; 1758 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1759 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1760 atomic_inc(&n_rcu_torture_mbchk_tries); 1761 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1762 atomic_inc(&n_rcu_torture_mbchk_fail); 1763 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1764 rtrcp_assigner->rtc_ready = 0; 1765 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1766 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1767 } 1768 1769 /* 1770 * Do one extension of an RCU read-side critical section using the 1771 * current reader state in readstate (set to zero for initial entry 1772 * to extended critical section), set the new state as specified by 1773 * newstate (set to zero for final exit from extended critical section), 1774 * and random-number-generator state in trsp. If this is neither the 1775 * beginning or end of the critical section and if there was actually a 1776 * change, do a ->read_delay(). 1777 */ 1778 static void rcutorture_one_extend(int *readstate, int newstate, 1779 struct torture_random_state *trsp, 1780 struct rt_read_seg *rtrsp) 1781 { 1782 unsigned long flags; 1783 int idxnew1 = -1; 1784 int idxnew2 = -1; 1785 int idxold1 = *readstate; 1786 int idxold2 = idxold1; 1787 int statesnew = ~*readstate & newstate; 1788 int statesold = *readstate & ~newstate; 1789 1790 WARN_ON_ONCE(idxold2 < 0); 1791 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1792 rtrsp->rt_readstate = newstate; 1793 1794 /* First, put new protection in place to avoid critical-section gap. */ 1795 if (statesnew & RCUTORTURE_RDR_BH) 1796 local_bh_disable(); 1797 if (statesnew & RCUTORTURE_RDR_RBH) 1798 rcu_read_lock_bh(); 1799 if (statesnew & RCUTORTURE_RDR_IRQ) 1800 local_irq_disable(); 1801 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1802 preempt_disable(); 1803 if (statesnew & RCUTORTURE_RDR_SCHED) 1804 rcu_read_lock_sched(); 1805 if (statesnew & RCUTORTURE_RDR_RCU_1) 1806 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1807 if (statesnew & RCUTORTURE_RDR_RCU_2) 1808 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1809 1810 /* 1811 * Next, remove old protection, in decreasing order of strength 1812 * to avoid unlock paths that aren't safe in the stronger 1813 * context. Namely: BH can not be enabled with disabled interrupts. 1814 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1815 * context. 1816 */ 1817 if (statesold & RCUTORTURE_RDR_IRQ) 1818 local_irq_enable(); 1819 if (statesold & RCUTORTURE_RDR_PREEMPT) 1820 preempt_enable(); 1821 if (statesold & RCUTORTURE_RDR_SCHED) 1822 rcu_read_unlock_sched(); 1823 if (statesold & RCUTORTURE_RDR_BH) 1824 local_bh_enable(); 1825 if (statesold & RCUTORTURE_RDR_RBH) 1826 rcu_read_unlock_bh(); 1827 if (statesold & RCUTORTURE_RDR_RCU_2) { 1828 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1829 WARN_ON_ONCE(idxnew2 != -1); 1830 idxold2 = 0; 1831 } 1832 if (statesold & RCUTORTURE_RDR_RCU_1) { 1833 bool lockit; 1834 1835 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1836 if (lockit) 1837 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1838 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1839 WARN_ON_ONCE(idxnew1 != -1); 1840 idxold1 = 0; 1841 if (lockit) 1842 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1843 } 1844 1845 /* Delay if neither beginning nor end and there was a change. */ 1846 if ((statesnew || statesold) && *readstate && newstate) 1847 cur_ops->read_delay(trsp, rtrsp); 1848 1849 /* Update the reader state. */ 1850 if (idxnew1 == -1) 1851 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1852 WARN_ON_ONCE(idxnew1 < 0); 1853 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1854 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1855 if (idxnew2 == -1) 1856 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1857 WARN_ON_ONCE(idxnew2 < 0); 1858 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1859 *readstate = idxnew1 | idxnew2 | newstate; 1860 WARN_ON_ONCE(*readstate < 0); 1861 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1862 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1863 } 1864 1865 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1866 static int rcutorture_extend_mask_max(void) 1867 { 1868 int mask; 1869 1870 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1871 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1872 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1873 return mask; 1874 } 1875 1876 /* Return a random protection state mask, but with at least one bit set. */ 1877 static int 1878 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1879 { 1880 int mask = rcutorture_extend_mask_max(); 1881 unsigned long randmask1 = torture_random(trsp); 1882 unsigned long randmask2 = randmask1 >> 3; 1883 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1884 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1885 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1886 1887 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1888 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1889 if (!(randmask1 & 0x7)) 1890 mask = mask & randmask2; 1891 else 1892 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1893 1894 // Can't have nested RCU reader without outer RCU reader. 1895 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1896 if (oldmask & RCUTORTURE_RDR_RCU_1) 1897 mask &= ~RCUTORTURE_RDR_RCU_2; 1898 else 1899 mask |= RCUTORTURE_RDR_RCU_1; 1900 } 1901 1902 /* 1903 * Can't enable bh w/irq disabled. 1904 */ 1905 if (mask & RCUTORTURE_RDR_IRQ) 1906 mask |= oldmask & bhs; 1907 1908 /* 1909 * Ideally these sequences would be detected in debug builds 1910 * (regardless of RT), but until then don't stop testing 1911 * them on non-RT. 1912 */ 1913 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1914 /* Can't modify BH in atomic context */ 1915 if (oldmask & preempts_irq) 1916 mask &= ~bhs; 1917 if ((oldmask | mask) & preempts_irq) 1918 mask |= oldmask & bhs; 1919 } 1920 1921 return mask ?: RCUTORTURE_RDR_RCU_1; 1922 } 1923 1924 /* 1925 * Do a randomly selected number of extensions of an existing RCU read-side 1926 * critical section. 1927 */ 1928 static struct rt_read_seg * 1929 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1930 struct rt_read_seg *rtrsp) 1931 { 1932 int i; 1933 int j; 1934 int mask = rcutorture_extend_mask_max(); 1935 1936 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1937 if (!((mask - 1) & mask)) 1938 return rtrsp; /* Current RCU reader not extendable. */ 1939 /* Bias towards larger numbers of loops. */ 1940 i = torture_random(trsp); 1941 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1942 for (j = 0; j < i; j++) { 1943 mask = rcutorture_extend_mask(*readstate, trsp); 1944 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1945 } 1946 return &rtrsp[j]; 1947 } 1948 1949 /* 1950 * Do one read-side critical section, returning false if there was 1951 * no data to read. Can be invoked both from process context and 1952 * from a timer handler. 1953 */ 1954 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1955 { 1956 bool checkpolling = !(torture_random(trsp) & 0xfff); 1957 unsigned long cookie; 1958 struct rcu_gp_oldstate cookie_full; 1959 int i; 1960 unsigned long started; 1961 unsigned long completed; 1962 int newstate; 1963 struct rcu_torture *p; 1964 int pipe_count; 1965 int readstate = 0; 1966 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1967 struct rt_read_seg *rtrsp = &rtseg[0]; 1968 struct rt_read_seg *rtrsp1; 1969 unsigned long long ts; 1970 1971 WARN_ON_ONCE(!rcu_is_watching()); 1972 newstate = rcutorture_extend_mask(readstate, trsp); 1973 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1974 if (checkpolling) { 1975 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1976 cookie = cur_ops->get_gp_state(); 1977 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1978 cur_ops->get_gp_state_full(&cookie_full); 1979 } 1980 started = cur_ops->get_gp_seq(); 1981 ts = rcu_trace_clock_local(); 1982 p = rcu_dereference_check(rcu_torture_current, 1983 !cur_ops->readlock_held || cur_ops->readlock_held()); 1984 if (p == NULL) { 1985 /* Wait for rcu_torture_writer to get underway */ 1986 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1987 return false; 1988 } 1989 if (p->rtort_mbtest == 0) 1990 atomic_inc(&n_rcu_torture_mberror); 1991 rcu_torture_reader_do_mbchk(myid, p, trsp); 1992 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1993 preempt_disable(); 1994 pipe_count = READ_ONCE(p->rtort_pipe_count); 1995 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1996 /* Should not happen, but... */ 1997 pipe_count = RCU_TORTURE_PIPE_LEN; 1998 } 1999 completed = cur_ops->get_gp_seq(); 2000 if (pipe_count > 1) { 2001 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 2002 ts, started, completed); 2003 rcu_ftrace_dump(DUMP_ALL); 2004 } 2005 __this_cpu_inc(rcu_torture_count[pipe_count]); 2006 completed = rcutorture_seq_diff(completed, started); 2007 if (completed > RCU_TORTURE_PIPE_LEN) { 2008 /* Should not happen, but... */ 2009 completed = RCU_TORTURE_PIPE_LEN; 2010 } 2011 __this_cpu_inc(rcu_torture_batch[completed]); 2012 preempt_enable(); 2013 if (checkpolling) { 2014 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2015 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2016 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2017 __func__, 2018 rcu_torture_writer_state_getname(), 2019 rcu_torture_writer_state, 2020 cookie, cur_ops->get_gp_state()); 2021 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2022 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2023 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2024 __func__, 2025 rcu_torture_writer_state_getname(), 2026 rcu_torture_writer_state, 2027 cpumask_pr_args(cpu_online_mask)); 2028 } 2029 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2030 WARN_ON_ONCE(readstate); 2031 // This next splat is expected behavior if leakpointer, especially 2032 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2033 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2034 2035 /* If error or close call, record the sequence of reader protections. */ 2036 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2037 i = 0; 2038 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2039 err_segs[i++] = *rtrsp1; 2040 rt_read_nsegs = i; 2041 } 2042 2043 return true; 2044 } 2045 2046 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2047 2048 /* 2049 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2050 * incrementing the corresponding element of the pipeline array. The 2051 * counter in the element should never be greater than 1, otherwise, the 2052 * RCU implementation is broken. 2053 */ 2054 static void rcu_torture_timer(struct timer_list *unused) 2055 { 2056 atomic_long_inc(&n_rcu_torture_timers); 2057 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2058 2059 /* Test call_rcu() invocation from interrupt handler. */ 2060 if (cur_ops->call) { 2061 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2062 2063 if (rhp) 2064 cur_ops->call(rhp, rcu_torture_timer_cb); 2065 } 2066 } 2067 2068 /* 2069 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2070 * incrementing the corresponding element of the pipeline array. The 2071 * counter in the element should never be greater than 1, otherwise, the 2072 * RCU implementation is broken. 2073 */ 2074 static int 2075 rcu_torture_reader(void *arg) 2076 { 2077 unsigned long lastsleep = jiffies; 2078 long myid = (long)arg; 2079 int mynumonline = myid; 2080 DEFINE_TORTURE_RANDOM(rand); 2081 struct timer_list t; 2082 2083 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2084 set_user_nice(current, MAX_NICE); 2085 if (irqreader && cur_ops->irq_capable) 2086 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2087 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2088 do { 2089 if (irqreader && cur_ops->irq_capable) { 2090 if (!timer_pending(&t)) 2091 mod_timer(&t, jiffies + 1); 2092 } 2093 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2094 schedule_timeout_interruptible(HZ); 2095 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2096 torture_hrtimeout_us(500, 1000, &rand); 2097 lastsleep = jiffies + 10; 2098 } 2099 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2100 schedule_timeout_interruptible(HZ / 5); 2101 stutter_wait("rcu_torture_reader"); 2102 } while (!torture_must_stop()); 2103 if (irqreader && cur_ops->irq_capable) { 2104 del_timer_sync(&t); 2105 destroy_timer_on_stack(&t); 2106 } 2107 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2108 torture_kthread_stopping("rcu_torture_reader"); 2109 return 0; 2110 } 2111 2112 /* 2113 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2114 * increase race probabilities and fuzzes the interval between toggling. 2115 */ 2116 static int rcu_nocb_toggle(void *arg) 2117 { 2118 int cpu; 2119 int maxcpu = -1; 2120 int oldnice = task_nice(current); 2121 long r; 2122 DEFINE_TORTURE_RANDOM(rand); 2123 ktime_t toggle_delay; 2124 unsigned long toggle_fuzz; 2125 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2126 2127 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2128 while (!rcu_inkernel_boot_has_ended()) 2129 schedule_timeout_interruptible(HZ / 10); 2130 for_each_possible_cpu(cpu) 2131 maxcpu = cpu; 2132 WARN_ON(maxcpu < 0); 2133 if (toggle_interval > ULONG_MAX) 2134 toggle_fuzz = ULONG_MAX >> 3; 2135 else 2136 toggle_fuzz = toggle_interval >> 3; 2137 if (toggle_fuzz <= 0) 2138 toggle_fuzz = NSEC_PER_USEC; 2139 do { 2140 r = torture_random(&rand); 2141 cpu = (r >> 1) % (maxcpu + 1); 2142 if (r & 0x1) { 2143 rcu_nocb_cpu_offload(cpu); 2144 atomic_long_inc(&n_nocb_offload); 2145 } else { 2146 rcu_nocb_cpu_deoffload(cpu); 2147 atomic_long_inc(&n_nocb_deoffload); 2148 } 2149 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2150 set_current_state(TASK_INTERRUPTIBLE); 2151 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2152 if (stutter_wait("rcu_nocb_toggle")) 2153 sched_set_normal(current, oldnice); 2154 } while (!torture_must_stop()); 2155 torture_kthread_stopping("rcu_nocb_toggle"); 2156 return 0; 2157 } 2158 2159 /* 2160 * Print torture statistics. Caller must ensure that there is only 2161 * one call to this function at a given time!!! This is normally 2162 * accomplished by relying on the module system to only have one copy 2163 * of the module loaded, and then by giving the rcu_torture_stats 2164 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2165 * thread is not running). 2166 */ 2167 static void 2168 rcu_torture_stats_print(void) 2169 { 2170 int cpu; 2171 int i; 2172 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2173 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2174 struct rcu_torture *rtcp; 2175 static unsigned long rtcv_snap = ULONG_MAX; 2176 static bool splatted; 2177 struct task_struct *wtp; 2178 2179 for_each_possible_cpu(cpu) { 2180 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2181 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2182 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2183 } 2184 } 2185 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2186 if (pipesummary[i] != 0) 2187 break; 2188 } 2189 2190 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2191 rtcp = rcu_access_pointer(rcu_torture_current); 2192 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2193 rtcp, 2194 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2195 rcu_torture_current_version, 2196 list_empty(&rcu_torture_freelist), 2197 atomic_read(&n_rcu_torture_alloc), 2198 atomic_read(&n_rcu_torture_alloc_fail), 2199 atomic_read(&n_rcu_torture_free)); 2200 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2201 atomic_read(&n_rcu_torture_mberror), 2202 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2203 n_rcu_torture_barrier_error, 2204 n_rcu_torture_boost_ktrerror); 2205 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2206 n_rcu_torture_boost_failure, 2207 n_rcu_torture_boosts, 2208 atomic_long_read(&n_rcu_torture_timers)); 2209 torture_onoff_stats(); 2210 pr_cont("barrier: %ld/%ld:%ld ", 2211 data_race(n_barrier_successes), 2212 data_race(n_barrier_attempts), 2213 data_race(n_rcu_torture_barrier_error)); 2214 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2215 pr_cont("nocb-toggles: %ld:%ld\n", 2216 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2217 2218 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2219 if (atomic_read(&n_rcu_torture_mberror) || 2220 atomic_read(&n_rcu_torture_mbchk_fail) || 2221 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2222 n_rcu_torture_boost_failure || i > 1) { 2223 pr_cont("%s", "!!! "); 2224 atomic_inc(&n_rcu_torture_error); 2225 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2226 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2227 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2228 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2229 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2230 WARN_ON_ONCE(i > 1); // Too-short grace period 2231 } 2232 pr_cont("Reader Pipe: "); 2233 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2234 pr_cont(" %ld", pipesummary[i]); 2235 pr_cont("\n"); 2236 2237 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2238 pr_cont("Reader Batch: "); 2239 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2240 pr_cont(" %ld", batchsummary[i]); 2241 pr_cont("\n"); 2242 2243 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2244 pr_cont("Free-Block Circulation: "); 2245 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2246 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2247 } 2248 pr_cont("\n"); 2249 2250 if (cur_ops->stats) 2251 cur_ops->stats(); 2252 if (rtcv_snap == rcu_torture_current_version && 2253 rcu_access_pointer(rcu_torture_current) && 2254 !rcu_stall_is_suppressed()) { 2255 int __maybe_unused flags = 0; 2256 unsigned long __maybe_unused gp_seq = 0; 2257 2258 rcutorture_get_gp_data(cur_ops->ttype, 2259 &flags, &gp_seq); 2260 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 2261 &flags, &gp_seq); 2262 wtp = READ_ONCE(writer_task); 2263 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2264 rcu_torture_writer_state_getname(), 2265 rcu_torture_writer_state, gp_seq, flags, 2266 wtp == NULL ? ~0U : wtp->__state, 2267 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2268 if (!splatted && wtp) { 2269 sched_show_task(wtp); 2270 splatted = true; 2271 } 2272 if (cur_ops->gp_kthread_dbg) 2273 cur_ops->gp_kthread_dbg(); 2274 rcu_ftrace_dump(DUMP_ALL); 2275 } 2276 rtcv_snap = rcu_torture_current_version; 2277 } 2278 2279 /* 2280 * Periodically prints torture statistics, if periodic statistics printing 2281 * was specified via the stat_interval module parameter. 2282 */ 2283 static int 2284 rcu_torture_stats(void *arg) 2285 { 2286 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2287 do { 2288 schedule_timeout_interruptible(stat_interval * HZ); 2289 rcu_torture_stats_print(); 2290 torture_shutdown_absorb("rcu_torture_stats"); 2291 } while (!torture_must_stop()); 2292 torture_kthread_stopping("rcu_torture_stats"); 2293 return 0; 2294 } 2295 2296 /* Test mem_dump_obj() and friends. */ 2297 static void rcu_torture_mem_dump_obj(void) 2298 { 2299 struct rcu_head *rhp; 2300 struct kmem_cache *kcp; 2301 static int z; 2302 2303 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2304 if (WARN_ON_ONCE(!kcp)) 2305 return; 2306 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2307 if (WARN_ON_ONCE(!rhp)) { 2308 kmem_cache_destroy(kcp); 2309 return; 2310 } 2311 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2312 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2313 mem_dump_obj(ZERO_SIZE_PTR); 2314 pr_alert("mem_dump_obj(NULL):"); 2315 mem_dump_obj(NULL); 2316 pr_alert("mem_dump_obj(%px):", &rhp); 2317 mem_dump_obj(&rhp); 2318 pr_alert("mem_dump_obj(%px):", rhp); 2319 mem_dump_obj(rhp); 2320 pr_alert("mem_dump_obj(%px):", &rhp->func); 2321 mem_dump_obj(&rhp->func); 2322 pr_alert("mem_dump_obj(%px):", &z); 2323 mem_dump_obj(&z); 2324 kmem_cache_free(kcp, rhp); 2325 kmem_cache_destroy(kcp); 2326 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2327 if (WARN_ON_ONCE(!rhp)) 2328 return; 2329 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2330 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2331 mem_dump_obj(rhp); 2332 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2333 mem_dump_obj(&rhp->func); 2334 kfree(rhp); 2335 rhp = vmalloc(4096); 2336 if (WARN_ON_ONCE(!rhp)) 2337 return; 2338 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2339 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2340 mem_dump_obj(rhp); 2341 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2342 mem_dump_obj(&rhp->func); 2343 vfree(rhp); 2344 } 2345 2346 static void 2347 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2348 { 2349 pr_alert("%s" TORTURE_FLAG 2350 "--- %s: nreaders=%d nfakewriters=%d " 2351 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2352 "shuffle_interval=%d stutter=%d irqreader=%d " 2353 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2354 "test_boost=%d/%d test_boost_interval=%d " 2355 "test_boost_duration=%d shutdown_secs=%d " 2356 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2357 "stall_cpu_block=%d " 2358 "n_barrier_cbs=%d " 2359 "onoff_interval=%d onoff_holdoff=%d " 2360 "read_exit_delay=%d read_exit_burst=%d " 2361 "nocbs_nthreads=%d nocbs_toggle=%d " 2362 "test_nmis=%d\n", 2363 torture_type, tag, nrealreaders, nfakewriters, 2364 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2365 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2366 test_boost, cur_ops->can_boost, 2367 test_boost_interval, test_boost_duration, shutdown_secs, 2368 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2369 stall_cpu_block, 2370 n_barrier_cbs, 2371 onoff_interval, onoff_holdoff, 2372 read_exit_delay, read_exit_burst, 2373 nocbs_nthreads, nocbs_toggle, 2374 test_nmis); 2375 } 2376 2377 static int rcutorture_booster_cleanup(unsigned int cpu) 2378 { 2379 struct task_struct *t; 2380 2381 if (boost_tasks[cpu] == NULL) 2382 return 0; 2383 mutex_lock(&boost_mutex); 2384 t = boost_tasks[cpu]; 2385 boost_tasks[cpu] = NULL; 2386 rcu_torture_enable_rt_throttle(); 2387 mutex_unlock(&boost_mutex); 2388 2389 /* This must be outside of the mutex, otherwise deadlock! */ 2390 torture_stop_kthread(rcu_torture_boost, t); 2391 return 0; 2392 } 2393 2394 static int rcutorture_booster_init(unsigned int cpu) 2395 { 2396 int retval; 2397 2398 if (boost_tasks[cpu] != NULL) 2399 return 0; /* Already created, nothing more to do. */ 2400 2401 // Testing RCU priority boosting requires rcutorture do 2402 // some serious abuse. Counter this by running ksoftirqd 2403 // at higher priority. 2404 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2405 struct sched_param sp; 2406 struct task_struct *t; 2407 2408 t = per_cpu(ksoftirqd, cpu); 2409 WARN_ON_ONCE(!t); 2410 sp.sched_priority = 2; 2411 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2412 } 2413 2414 /* Don't allow time recalculation while creating a new task. */ 2415 mutex_lock(&boost_mutex); 2416 rcu_torture_disable_rt_throttle(); 2417 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2418 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2419 cpu, "rcu_torture_boost_%u"); 2420 if (IS_ERR(boost_tasks[cpu])) { 2421 retval = PTR_ERR(boost_tasks[cpu]); 2422 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2423 n_rcu_torture_boost_ktrerror++; 2424 boost_tasks[cpu] = NULL; 2425 mutex_unlock(&boost_mutex); 2426 return retval; 2427 } 2428 mutex_unlock(&boost_mutex); 2429 return 0; 2430 } 2431 2432 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2433 { 2434 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2435 return NOTIFY_OK; 2436 } 2437 2438 static struct notifier_block rcu_torture_stall_block = { 2439 .notifier_call = rcu_torture_stall_nf, 2440 }; 2441 2442 /* 2443 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2444 * induces a CPU stall for the time specified by stall_cpu. 2445 */ 2446 static int rcu_torture_stall(void *args) 2447 { 2448 int idx; 2449 int ret; 2450 unsigned long stop_at; 2451 2452 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2453 if (rcu_cpu_stall_notifiers) { 2454 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 2455 if (ret) 2456 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 2457 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 2458 } 2459 if (stall_cpu_holdoff > 0) { 2460 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2461 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2462 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2463 } 2464 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2465 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2466 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2467 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2468 if (kthread_should_stop()) 2469 break; 2470 schedule_timeout_uninterruptible(HZ); 2471 } 2472 } 2473 if (!kthread_should_stop() && stall_cpu > 0) { 2474 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2475 stop_at = ktime_get_seconds() + stall_cpu; 2476 /* RCU CPU stall is expected behavior in following code. */ 2477 idx = cur_ops->readlock(); 2478 if (stall_cpu_irqsoff) 2479 local_irq_disable(); 2480 else if (!stall_cpu_block) 2481 preempt_disable(); 2482 pr_alert("%s start on CPU %d.\n", 2483 __func__, raw_smp_processor_id()); 2484 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 2485 stop_at)) 2486 if (stall_cpu_block) { 2487 #ifdef CONFIG_PREEMPTION 2488 preempt_schedule(); 2489 #else 2490 schedule_timeout_uninterruptible(HZ); 2491 #endif 2492 } else if (stall_no_softlockup) { 2493 touch_softlockup_watchdog(); 2494 } 2495 if (stall_cpu_irqsoff) 2496 local_irq_enable(); 2497 else if (!stall_cpu_block) 2498 preempt_enable(); 2499 cur_ops->readunlock(idx); 2500 } 2501 pr_alert("%s end.\n", __func__); 2502 if (rcu_cpu_stall_notifiers && !ret) { 2503 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 2504 if (ret) 2505 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 2506 } 2507 torture_shutdown_absorb("rcu_torture_stall"); 2508 while (!kthread_should_stop()) 2509 schedule_timeout_interruptible(10 * HZ); 2510 return 0; 2511 } 2512 2513 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2514 static int __init rcu_torture_stall_init(void) 2515 { 2516 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2517 return 0; 2518 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2519 } 2520 2521 /* State structure for forward-progress self-propagating RCU callback. */ 2522 struct fwd_cb_state { 2523 struct rcu_head rh; 2524 int stop; 2525 }; 2526 2527 /* 2528 * Forward-progress self-propagating RCU callback function. Because 2529 * callbacks run from softirq, this function is an implicit RCU read-side 2530 * critical section. 2531 */ 2532 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2533 { 2534 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2535 2536 if (READ_ONCE(fcsp->stop)) { 2537 WRITE_ONCE(fcsp->stop, 2); 2538 return; 2539 } 2540 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2541 } 2542 2543 /* State for continuous-flood RCU callbacks. */ 2544 struct rcu_fwd_cb { 2545 struct rcu_head rh; 2546 struct rcu_fwd_cb *rfc_next; 2547 struct rcu_fwd *rfc_rfp; 2548 int rfc_gps; 2549 }; 2550 2551 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2552 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2553 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2554 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2555 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2556 2557 struct rcu_launder_hist { 2558 long n_launders; 2559 unsigned long launder_gp_seq; 2560 }; 2561 2562 struct rcu_fwd { 2563 spinlock_t rcu_fwd_lock; 2564 struct rcu_fwd_cb *rcu_fwd_cb_head; 2565 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2566 long n_launders_cb; 2567 unsigned long rcu_fwd_startat; 2568 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2569 unsigned long rcu_launder_gp_seq_start; 2570 int rcu_fwd_id; 2571 }; 2572 2573 static DEFINE_MUTEX(rcu_fwd_mutex); 2574 static struct rcu_fwd *rcu_fwds; 2575 static unsigned long rcu_fwd_seq; 2576 static atomic_long_t rcu_fwd_max_cbs; 2577 static bool rcu_fwd_emergency_stop; 2578 2579 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2580 { 2581 unsigned long gps; 2582 unsigned long gps_old; 2583 int i; 2584 int j; 2585 2586 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2587 if (rfp->n_launders_hist[i].n_launders > 0) 2588 break; 2589 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2590 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2591 gps_old = rfp->rcu_launder_gp_seq_start; 2592 for (j = 0; j <= i; j++) { 2593 gps = rfp->n_launders_hist[j].launder_gp_seq; 2594 pr_cont(" %ds/%d: %ld:%ld", 2595 j + 1, FWD_CBS_HIST_DIV, 2596 rfp->n_launders_hist[j].n_launders, 2597 rcutorture_seq_diff(gps, gps_old)); 2598 gps_old = gps; 2599 } 2600 pr_cont("\n"); 2601 } 2602 2603 /* Callback function for continuous-flood RCU callbacks. */ 2604 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2605 { 2606 unsigned long flags; 2607 int i; 2608 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2609 struct rcu_fwd_cb **rfcpp; 2610 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2611 2612 rfcp->rfc_next = NULL; 2613 rfcp->rfc_gps++; 2614 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2615 rfcpp = rfp->rcu_fwd_cb_tail; 2616 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2617 WRITE_ONCE(*rfcpp, rfcp); 2618 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2619 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2620 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2621 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2622 rfp->n_launders_hist[i].n_launders++; 2623 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2624 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2625 } 2626 2627 // Give the scheduler a chance, even on nohz_full CPUs. 2628 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2629 { 2630 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2631 // Real call_rcu() floods hit userspace, so emulate that. 2632 if (need_resched() || (iter & 0xfff)) 2633 schedule(); 2634 return; 2635 } 2636 // No userspace emulation: CB invocation throttles call_rcu() 2637 cond_resched(); 2638 } 2639 2640 /* 2641 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2642 * test is over or because we hit an OOM event. 2643 */ 2644 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2645 { 2646 unsigned long flags; 2647 unsigned long freed = 0; 2648 struct rcu_fwd_cb *rfcp; 2649 2650 for (;;) { 2651 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2652 rfcp = rfp->rcu_fwd_cb_head; 2653 if (!rfcp) { 2654 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2655 break; 2656 } 2657 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2658 if (!rfp->rcu_fwd_cb_head) 2659 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2660 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2661 kfree(rfcp); 2662 freed++; 2663 rcu_torture_fwd_prog_cond_resched(freed); 2664 if (tick_nohz_full_enabled()) { 2665 local_irq_save(flags); 2666 rcu_momentary_dyntick_idle(); 2667 local_irq_restore(flags); 2668 } 2669 } 2670 return freed; 2671 } 2672 2673 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2674 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2675 int *tested, int *tested_tries) 2676 { 2677 unsigned long cver; 2678 unsigned long dur; 2679 struct fwd_cb_state fcs; 2680 unsigned long gps; 2681 int idx; 2682 int sd; 2683 int sd4; 2684 bool selfpropcb = false; 2685 unsigned long stopat; 2686 static DEFINE_TORTURE_RANDOM(trs); 2687 2688 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2689 if (!cur_ops->sync) 2690 return; // Cannot do need_resched() forward progress testing without ->sync. 2691 if (cur_ops->call && cur_ops->cb_barrier) { 2692 init_rcu_head_on_stack(&fcs.rh); 2693 selfpropcb = true; 2694 } 2695 2696 /* Tight loop containing cond_resched(). */ 2697 atomic_inc(&rcu_fwd_cb_nodelay); 2698 cur_ops->sync(); /* Later readers see above write. */ 2699 if (selfpropcb) { 2700 WRITE_ONCE(fcs.stop, 0); 2701 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2702 } 2703 cver = READ_ONCE(rcu_torture_current_version); 2704 gps = cur_ops->get_gp_seq(); 2705 sd = cur_ops->stall_dur() + 1; 2706 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2707 dur = sd4 + torture_random(&trs) % (sd - sd4); 2708 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2709 stopat = rfp->rcu_fwd_startat + dur; 2710 while (time_before(jiffies, stopat) && 2711 !shutdown_time_arrived() && 2712 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2713 idx = cur_ops->readlock(); 2714 udelay(10); 2715 cur_ops->readunlock(idx); 2716 if (!fwd_progress_need_resched || need_resched()) 2717 cond_resched(); 2718 } 2719 (*tested_tries)++; 2720 if (!time_before(jiffies, stopat) && 2721 !shutdown_time_arrived() && 2722 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2723 (*tested)++; 2724 cver = READ_ONCE(rcu_torture_current_version) - cver; 2725 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2726 WARN_ON(!cver && gps < 2); 2727 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2728 rfp->rcu_fwd_id, dur, cver, gps); 2729 } 2730 if (selfpropcb) { 2731 WRITE_ONCE(fcs.stop, 1); 2732 cur_ops->sync(); /* Wait for running CB to complete. */ 2733 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2734 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2735 } 2736 2737 if (selfpropcb) { 2738 WARN_ON(READ_ONCE(fcs.stop) != 2); 2739 destroy_rcu_head_on_stack(&fcs.rh); 2740 } 2741 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2742 atomic_dec(&rcu_fwd_cb_nodelay); 2743 } 2744 2745 /* Carry out call_rcu() forward-progress testing. */ 2746 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2747 { 2748 unsigned long cver; 2749 unsigned long flags; 2750 unsigned long gps; 2751 int i; 2752 long n_launders; 2753 long n_launders_cb_snap; 2754 long n_launders_sa; 2755 long n_max_cbs; 2756 long n_max_gps; 2757 struct rcu_fwd_cb *rfcp; 2758 struct rcu_fwd_cb *rfcpn; 2759 unsigned long stopat; 2760 unsigned long stoppedat; 2761 2762 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2763 if (READ_ONCE(rcu_fwd_emergency_stop)) 2764 return; /* Get out of the way quickly, no GP wait! */ 2765 if (!cur_ops->call) 2766 return; /* Can't do call_rcu() fwd prog without ->call. */ 2767 2768 /* Loop continuously posting RCU callbacks. */ 2769 atomic_inc(&rcu_fwd_cb_nodelay); 2770 cur_ops->sync(); /* Later readers see above write. */ 2771 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2772 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2773 n_launders = 0; 2774 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2775 n_launders_sa = 0; 2776 n_max_cbs = 0; 2777 n_max_gps = 0; 2778 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2779 rfp->n_launders_hist[i].n_launders = 0; 2780 cver = READ_ONCE(rcu_torture_current_version); 2781 gps = cur_ops->get_gp_seq(); 2782 rfp->rcu_launder_gp_seq_start = gps; 2783 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2784 while (time_before(jiffies, stopat) && 2785 !shutdown_time_arrived() && 2786 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2787 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2788 rfcpn = NULL; 2789 if (rfcp) 2790 rfcpn = READ_ONCE(rfcp->rfc_next); 2791 if (rfcpn) { 2792 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2793 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2794 break; 2795 rfp->rcu_fwd_cb_head = rfcpn; 2796 n_launders++; 2797 n_launders_sa++; 2798 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2799 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2800 if (WARN_ON_ONCE(!rfcp)) { 2801 schedule_timeout_interruptible(1); 2802 continue; 2803 } 2804 n_max_cbs++; 2805 n_launders_sa = 0; 2806 rfcp->rfc_gps = 0; 2807 rfcp->rfc_rfp = rfp; 2808 } else { 2809 rfcp = NULL; 2810 } 2811 if (rfcp) 2812 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2813 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2814 if (tick_nohz_full_enabled()) { 2815 local_irq_save(flags); 2816 rcu_momentary_dyntick_idle(); 2817 local_irq_restore(flags); 2818 } 2819 } 2820 stoppedat = jiffies; 2821 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2822 cver = READ_ONCE(rcu_torture_current_version) - cver; 2823 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2824 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2825 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2826 (void)rcu_torture_fwd_prog_cbfree(rfp); 2827 2828 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2829 !shutdown_time_arrived()) { 2830 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2831 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2832 __func__, 2833 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2834 n_launders + n_max_cbs - n_launders_cb_snap, 2835 n_launders, n_launders_sa, 2836 n_max_gps, n_max_cbs, cver, gps); 2837 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2838 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2839 rcu_torture_fwd_cb_hist(rfp); 2840 mutex_unlock(&rcu_fwd_mutex); 2841 } 2842 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2843 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2844 atomic_dec(&rcu_fwd_cb_nodelay); 2845 } 2846 2847 2848 /* 2849 * OOM notifier, but this only prints diagnostic information for the 2850 * current forward-progress test. 2851 */ 2852 static int rcutorture_oom_notify(struct notifier_block *self, 2853 unsigned long notused, void *nfreed) 2854 { 2855 int i; 2856 long ncbs; 2857 struct rcu_fwd *rfp; 2858 2859 mutex_lock(&rcu_fwd_mutex); 2860 rfp = rcu_fwds; 2861 if (!rfp) { 2862 mutex_unlock(&rcu_fwd_mutex); 2863 return NOTIFY_OK; 2864 } 2865 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2866 __func__); 2867 for (i = 0; i < fwd_progress; i++) { 2868 rcu_torture_fwd_cb_hist(&rfp[i]); 2869 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2870 } 2871 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2872 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2873 ncbs = 0; 2874 for (i = 0; i < fwd_progress; i++) 2875 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2876 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2877 cur_ops->cb_barrier(); 2878 ncbs = 0; 2879 for (i = 0; i < fwd_progress; i++) 2880 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2881 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2882 cur_ops->cb_barrier(); 2883 ncbs = 0; 2884 for (i = 0; i < fwd_progress; i++) 2885 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2886 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2887 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2888 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2889 pr_info("%s returning after OOM processing.\n", __func__); 2890 mutex_unlock(&rcu_fwd_mutex); 2891 return NOTIFY_OK; 2892 } 2893 2894 static struct notifier_block rcutorture_oom_nb = { 2895 .notifier_call = rcutorture_oom_notify 2896 }; 2897 2898 /* Carry out grace-period forward-progress testing. */ 2899 static int rcu_torture_fwd_prog(void *args) 2900 { 2901 bool firsttime = true; 2902 long max_cbs; 2903 int oldnice = task_nice(current); 2904 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2905 struct rcu_fwd *rfp = args; 2906 int tested = 0; 2907 int tested_tries = 0; 2908 2909 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2910 rcu_bind_current_to_nocb(); 2911 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2912 set_user_nice(current, MAX_NICE); 2913 do { 2914 if (!rfp->rcu_fwd_id) { 2915 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2916 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2917 if (!firsttime) { 2918 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2919 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2920 } 2921 firsttime = false; 2922 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2923 } else { 2924 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2925 schedule_timeout_interruptible(HZ / 20); 2926 oldseq = READ_ONCE(rcu_fwd_seq); 2927 } 2928 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2929 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2930 rcu_torture_fwd_prog_cr(rfp); 2931 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2932 (!IS_ENABLED(CONFIG_TINY_RCU) || 2933 (rcu_inkernel_boot_has_ended() && 2934 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2935 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2936 2937 /* Avoid slow periods, better to test when busy. */ 2938 if (stutter_wait("rcu_torture_fwd_prog")) 2939 sched_set_normal(current, oldnice); 2940 } while (!torture_must_stop()); 2941 /* Short runs might not contain a valid forward-progress attempt. */ 2942 if (!rfp->rcu_fwd_id) { 2943 WARN_ON(!tested && tested_tries >= 5); 2944 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2945 } 2946 torture_kthread_stopping("rcu_torture_fwd_prog"); 2947 return 0; 2948 } 2949 2950 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2951 static int __init rcu_torture_fwd_prog_init(void) 2952 { 2953 int i; 2954 int ret = 0; 2955 struct rcu_fwd *rfp; 2956 2957 if (!fwd_progress) 2958 return 0; /* Not requested, so don't do it. */ 2959 if (fwd_progress >= nr_cpu_ids) { 2960 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2961 fwd_progress = nr_cpu_ids; 2962 } else if (fwd_progress < 0) { 2963 fwd_progress = nr_cpu_ids; 2964 } 2965 if ((!cur_ops->sync && !cur_ops->call) || 2966 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2967 cur_ops == &rcu_busted_ops) { 2968 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2969 fwd_progress = 0; 2970 return 0; 2971 } 2972 if (stall_cpu > 0) { 2973 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2974 fwd_progress = 0; 2975 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2976 return -EINVAL; /* In module, can fail back to user. */ 2977 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2978 return 0; 2979 } 2980 if (fwd_progress_holdoff <= 0) 2981 fwd_progress_holdoff = 1; 2982 if (fwd_progress_div <= 0) 2983 fwd_progress_div = 4; 2984 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 2985 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 2986 if (!rfp || !fwd_prog_tasks) { 2987 kfree(rfp); 2988 kfree(fwd_prog_tasks); 2989 fwd_prog_tasks = NULL; 2990 fwd_progress = 0; 2991 return -ENOMEM; 2992 } 2993 for (i = 0; i < fwd_progress; i++) { 2994 spin_lock_init(&rfp[i].rcu_fwd_lock); 2995 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 2996 rfp[i].rcu_fwd_id = i; 2997 } 2998 mutex_lock(&rcu_fwd_mutex); 2999 rcu_fwds = rfp; 3000 mutex_unlock(&rcu_fwd_mutex); 3001 register_oom_notifier(&rcutorture_oom_nb); 3002 for (i = 0; i < fwd_progress; i++) { 3003 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3004 if (ret) { 3005 fwd_progress = i; 3006 return ret; 3007 } 3008 } 3009 return 0; 3010 } 3011 3012 static void rcu_torture_fwd_prog_cleanup(void) 3013 { 3014 int i; 3015 struct rcu_fwd *rfp; 3016 3017 if (!rcu_fwds || !fwd_prog_tasks) 3018 return; 3019 for (i = 0; i < fwd_progress; i++) 3020 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3021 unregister_oom_notifier(&rcutorture_oom_nb); 3022 mutex_lock(&rcu_fwd_mutex); 3023 rfp = rcu_fwds; 3024 rcu_fwds = NULL; 3025 mutex_unlock(&rcu_fwd_mutex); 3026 kfree(rfp); 3027 kfree(fwd_prog_tasks); 3028 fwd_prog_tasks = NULL; 3029 } 3030 3031 /* Callback function for RCU barrier testing. */ 3032 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3033 { 3034 atomic_inc(&barrier_cbs_invoked); 3035 } 3036 3037 /* IPI handler to get callback posted on desired CPU, if online. */ 3038 static void rcu_torture_barrier1cb(void *rcu_void) 3039 { 3040 struct rcu_head *rhp = rcu_void; 3041 3042 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3043 } 3044 3045 /* kthread function to register callbacks used to test RCU barriers. */ 3046 static int rcu_torture_barrier_cbs(void *arg) 3047 { 3048 long myid = (long)arg; 3049 bool lastphase = false; 3050 bool newphase; 3051 struct rcu_head rcu; 3052 3053 init_rcu_head_on_stack(&rcu); 3054 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3055 set_user_nice(current, MAX_NICE); 3056 do { 3057 wait_event(barrier_cbs_wq[myid], 3058 (newphase = 3059 smp_load_acquire(&barrier_phase)) != lastphase || 3060 torture_must_stop()); 3061 lastphase = newphase; 3062 if (torture_must_stop()) 3063 break; 3064 /* 3065 * The above smp_load_acquire() ensures barrier_phase load 3066 * is ordered before the following ->call(). 3067 */ 3068 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 3069 &rcu, 1)) { 3070 // IPI failed, so use direct call from current CPU. 3071 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3072 } 3073 if (atomic_dec_and_test(&barrier_cbs_count)) 3074 wake_up(&barrier_wq); 3075 } while (!torture_must_stop()); 3076 if (cur_ops->cb_barrier != NULL) 3077 cur_ops->cb_barrier(); 3078 destroy_rcu_head_on_stack(&rcu); 3079 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3080 return 0; 3081 } 3082 3083 /* kthread function to drive and coordinate RCU barrier testing. */ 3084 static int rcu_torture_barrier(void *arg) 3085 { 3086 int i; 3087 3088 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3089 do { 3090 atomic_set(&barrier_cbs_invoked, 0); 3091 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3092 /* Ensure barrier_phase ordered after prior assignments. */ 3093 smp_store_release(&barrier_phase, !barrier_phase); 3094 for (i = 0; i < n_barrier_cbs; i++) 3095 wake_up(&barrier_cbs_wq[i]); 3096 wait_event(barrier_wq, 3097 atomic_read(&barrier_cbs_count) == 0 || 3098 torture_must_stop()); 3099 if (torture_must_stop()) 3100 break; 3101 n_barrier_attempts++; 3102 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3103 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3104 n_rcu_torture_barrier_error++; 3105 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3106 atomic_read(&barrier_cbs_invoked), 3107 n_barrier_cbs); 3108 WARN_ON(1); 3109 // Wait manually for the remaining callbacks 3110 i = 0; 3111 do { 3112 if (WARN_ON(i++ > HZ)) 3113 i = INT_MIN; 3114 schedule_timeout_interruptible(1); 3115 cur_ops->cb_barrier(); 3116 } while (atomic_read(&barrier_cbs_invoked) != 3117 n_barrier_cbs && 3118 !torture_must_stop()); 3119 smp_mb(); // Can't trust ordering if broken. 3120 if (!torture_must_stop()) 3121 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3122 atomic_read(&barrier_cbs_invoked)); 3123 } else { 3124 n_barrier_successes++; 3125 } 3126 schedule_timeout_interruptible(HZ / 10); 3127 } while (!torture_must_stop()); 3128 torture_kthread_stopping("rcu_torture_barrier"); 3129 return 0; 3130 } 3131 3132 /* Initialize RCU barrier testing. */ 3133 static int rcu_torture_barrier_init(void) 3134 { 3135 int i; 3136 int ret; 3137 3138 if (n_barrier_cbs <= 0) 3139 return 0; 3140 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3141 pr_alert("%s" TORTURE_FLAG 3142 " Call or barrier ops missing for %s,\n", 3143 torture_type, cur_ops->name); 3144 pr_alert("%s" TORTURE_FLAG 3145 " RCU barrier testing omitted from run.\n", 3146 torture_type); 3147 return 0; 3148 } 3149 atomic_set(&barrier_cbs_count, 0); 3150 atomic_set(&barrier_cbs_invoked, 0); 3151 barrier_cbs_tasks = 3152 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3153 GFP_KERNEL); 3154 barrier_cbs_wq = 3155 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3156 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3157 return -ENOMEM; 3158 for (i = 0; i < n_barrier_cbs; i++) { 3159 init_waitqueue_head(&barrier_cbs_wq[i]); 3160 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3161 (void *)(long)i, 3162 barrier_cbs_tasks[i]); 3163 if (ret) 3164 return ret; 3165 } 3166 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3167 } 3168 3169 /* Clean up after RCU barrier testing. */ 3170 static void rcu_torture_barrier_cleanup(void) 3171 { 3172 int i; 3173 3174 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3175 if (barrier_cbs_tasks != NULL) { 3176 for (i = 0; i < n_barrier_cbs; i++) 3177 torture_stop_kthread(rcu_torture_barrier_cbs, 3178 barrier_cbs_tasks[i]); 3179 kfree(barrier_cbs_tasks); 3180 barrier_cbs_tasks = NULL; 3181 } 3182 if (barrier_cbs_wq != NULL) { 3183 kfree(barrier_cbs_wq); 3184 barrier_cbs_wq = NULL; 3185 } 3186 } 3187 3188 static bool rcu_torture_can_boost(void) 3189 { 3190 static int boost_warn_once; 3191 int prio; 3192 3193 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3194 return false; 3195 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3196 return false; 3197 3198 prio = rcu_get_gp_kthreads_prio(); 3199 if (!prio) 3200 return false; 3201 3202 if (prio < 2) { 3203 if (boost_warn_once == 1) 3204 return false; 3205 3206 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3207 boost_warn_once = 1; 3208 return false; 3209 } 3210 3211 return true; 3212 } 3213 3214 static bool read_exit_child_stop; 3215 static bool read_exit_child_stopped; 3216 static wait_queue_head_t read_exit_wq; 3217 3218 // Child kthread which just does an rcutorture reader and exits. 3219 static int rcu_torture_read_exit_child(void *trsp_in) 3220 { 3221 struct torture_random_state *trsp = trsp_in; 3222 3223 set_user_nice(current, MAX_NICE); 3224 // Minimize time between reading and exiting. 3225 while (!kthread_should_stop()) 3226 schedule_timeout_uninterruptible(HZ / 20); 3227 (void)rcu_torture_one_read(trsp, -1); 3228 return 0; 3229 } 3230 3231 // Parent kthread which creates and destroys read-exit child kthreads. 3232 static int rcu_torture_read_exit(void *unused) 3233 { 3234 bool errexit = false; 3235 int i; 3236 struct task_struct *tsp; 3237 DEFINE_TORTURE_RANDOM(trs); 3238 3239 // Allocate and initialize. 3240 set_user_nice(current, MAX_NICE); 3241 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3242 3243 // Each pass through this loop does one read-exit episode. 3244 do { 3245 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3246 for (i = 0; i < read_exit_burst; i++) { 3247 if (READ_ONCE(read_exit_child_stop)) 3248 break; 3249 stutter_wait("rcu_torture_read_exit"); 3250 // Spawn child. 3251 tsp = kthread_run(rcu_torture_read_exit_child, 3252 &trs, "%s", "rcu_torture_read_exit_child"); 3253 if (IS_ERR(tsp)) { 3254 TOROUT_ERRSTRING("out of memory"); 3255 errexit = true; 3256 break; 3257 } 3258 cond_resched(); 3259 kthread_stop(tsp); 3260 n_read_exits++; 3261 } 3262 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3263 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3264 i = 0; 3265 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3266 schedule_timeout_uninterruptible(HZ); 3267 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3268 3269 // Clean up and exit. 3270 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3271 smp_mb(); // Store before wakeup. 3272 wake_up(&read_exit_wq); 3273 while (!torture_must_stop()) 3274 schedule_timeout_uninterruptible(HZ / 20); 3275 torture_kthread_stopping("rcu_torture_read_exit"); 3276 return 0; 3277 } 3278 3279 static int rcu_torture_read_exit_init(void) 3280 { 3281 if (read_exit_burst <= 0) 3282 return 0; 3283 init_waitqueue_head(&read_exit_wq); 3284 read_exit_child_stop = false; 3285 read_exit_child_stopped = false; 3286 return torture_create_kthread(rcu_torture_read_exit, NULL, 3287 read_exit_task); 3288 } 3289 3290 static void rcu_torture_read_exit_cleanup(void) 3291 { 3292 if (!read_exit_task) 3293 return; 3294 WRITE_ONCE(read_exit_child_stop, true); 3295 smp_mb(); // Above write before wait. 3296 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3297 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3298 } 3299 3300 static void rcutorture_test_nmis(int n) 3301 { 3302 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3303 int cpu; 3304 int dumpcpu; 3305 int i; 3306 3307 for (i = 0; i < n; i++) { 3308 preempt_disable(); 3309 cpu = smp_processor_id(); 3310 dumpcpu = cpu + 1; 3311 if (dumpcpu >= nr_cpu_ids) 3312 dumpcpu = 0; 3313 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3314 dump_cpu_task(dumpcpu); 3315 preempt_enable(); 3316 schedule_timeout_uninterruptible(15 * HZ); 3317 } 3318 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3319 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3320 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3321 } 3322 3323 static enum cpuhp_state rcutor_hp; 3324 3325 static void 3326 rcu_torture_cleanup(void) 3327 { 3328 int firsttime; 3329 int flags = 0; 3330 unsigned long gp_seq = 0; 3331 int i; 3332 3333 if (torture_cleanup_begin()) { 3334 if (cur_ops->cb_barrier != NULL) { 3335 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3336 cur_ops->cb_barrier(); 3337 } 3338 rcu_gp_slow_unregister(NULL); 3339 return; 3340 } 3341 if (!cur_ops) { 3342 torture_cleanup_end(); 3343 rcu_gp_slow_unregister(NULL); 3344 return; 3345 } 3346 3347 rcutorture_test_nmis(test_nmis); 3348 3349 if (cur_ops->gp_kthread_dbg) 3350 cur_ops->gp_kthread_dbg(); 3351 rcu_torture_read_exit_cleanup(); 3352 rcu_torture_barrier_cleanup(); 3353 rcu_torture_fwd_prog_cleanup(); 3354 torture_stop_kthread(rcu_torture_stall, stall_task); 3355 torture_stop_kthread(rcu_torture_writer, writer_task); 3356 3357 if (nocb_tasks) { 3358 for (i = 0; i < nrealnocbers; i++) 3359 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3360 kfree(nocb_tasks); 3361 nocb_tasks = NULL; 3362 } 3363 3364 if (reader_tasks) { 3365 for (i = 0; i < nrealreaders; i++) 3366 torture_stop_kthread(rcu_torture_reader, 3367 reader_tasks[i]); 3368 kfree(reader_tasks); 3369 reader_tasks = NULL; 3370 } 3371 kfree(rcu_torture_reader_mbchk); 3372 rcu_torture_reader_mbchk = NULL; 3373 3374 if (fakewriter_tasks) { 3375 for (i = 0; i < nfakewriters; i++) 3376 torture_stop_kthread(rcu_torture_fakewriter, 3377 fakewriter_tasks[i]); 3378 kfree(fakewriter_tasks); 3379 fakewriter_tasks = NULL; 3380 } 3381 3382 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3383 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3384 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3385 cur_ops->name, (long)gp_seq, flags, 3386 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3387 torture_stop_kthread(rcu_torture_stats, stats_task); 3388 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3389 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3390 cpuhp_remove_state(rcutor_hp); 3391 3392 /* 3393 * Wait for all RCU callbacks to fire, then do torture-type-specific 3394 * cleanup operations. 3395 */ 3396 if (cur_ops->cb_barrier != NULL) { 3397 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3398 cur_ops->cb_barrier(); 3399 } 3400 if (cur_ops->cleanup != NULL) 3401 cur_ops->cleanup(); 3402 3403 rcu_torture_mem_dump_obj(); 3404 3405 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3406 3407 if (err_segs_recorded) { 3408 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3409 if (rt_read_nsegs == 0) 3410 pr_alert("\t: No segments recorded!!!\n"); 3411 firsttime = 1; 3412 for (i = 0; i < rt_read_nsegs; i++) { 3413 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3414 if (err_segs[i].rt_delay_jiffies != 0) { 3415 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3416 err_segs[i].rt_delay_jiffies); 3417 firsttime = 0; 3418 } 3419 if (err_segs[i].rt_delay_ms != 0) { 3420 pr_cont("%s%ldms", firsttime ? "" : "+", 3421 err_segs[i].rt_delay_ms); 3422 firsttime = 0; 3423 } 3424 if (err_segs[i].rt_delay_us != 0) { 3425 pr_cont("%s%ldus", firsttime ? "" : "+", 3426 err_segs[i].rt_delay_us); 3427 firsttime = 0; 3428 } 3429 pr_cont("%s\n", 3430 err_segs[i].rt_preempted ? "preempted" : ""); 3431 3432 } 3433 } 3434 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3435 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3436 else if (torture_onoff_failures()) 3437 rcu_torture_print_module_parms(cur_ops, 3438 "End of test: RCU_HOTPLUG"); 3439 else 3440 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3441 torture_cleanup_end(); 3442 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay); 3443 } 3444 3445 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3446 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3447 { 3448 } 3449 3450 static void rcu_torture_err_cb(struct rcu_head *rhp) 3451 { 3452 /* 3453 * This -might- happen due to race conditions, but is unlikely. 3454 * The scenario that leads to this happening is that the 3455 * first of the pair of duplicate callbacks is queued, 3456 * someone else starts a grace period that includes that 3457 * callback, then the second of the pair must wait for the 3458 * next grace period. Unlikely, but can happen. If it 3459 * does happen, the debug-objects subsystem won't have splatted. 3460 */ 3461 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3462 } 3463 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3464 3465 /* 3466 * Verify that double-free causes debug-objects to complain, but only 3467 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3468 * cannot be carried out. 3469 */ 3470 static void rcu_test_debug_objects(void) 3471 { 3472 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3473 struct rcu_head rh1; 3474 struct rcu_head rh2; 3475 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3476 3477 init_rcu_head_on_stack(&rh1); 3478 init_rcu_head_on_stack(&rh2); 3479 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 3480 3481 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3482 preempt_disable(); /* Prevent preemption from interrupting test. */ 3483 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 3484 call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3485 local_irq_disable(); /* Make it harder to start a new grace period. */ 3486 call_rcu_hurry(&rh2, rcu_torture_leak_cb); 3487 call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3488 if (rhp) { 3489 call_rcu_hurry(rhp, rcu_torture_leak_cb); 3490 call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3491 } 3492 local_irq_enable(); 3493 rcu_read_unlock(); 3494 preempt_enable(); 3495 3496 /* Wait for them all to get done so we can safely return. */ 3497 rcu_barrier(); 3498 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 3499 destroy_rcu_head_on_stack(&rh1); 3500 destroy_rcu_head_on_stack(&rh2); 3501 kfree(rhp); 3502 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3503 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 3504 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3505 } 3506 3507 static void rcutorture_sync(void) 3508 { 3509 static unsigned long n; 3510 3511 if (cur_ops->sync && !(++n & 0xfff)) 3512 cur_ops->sync(); 3513 } 3514 3515 static DEFINE_MUTEX(mut0); 3516 static DEFINE_MUTEX(mut1); 3517 static DEFINE_MUTEX(mut2); 3518 static DEFINE_MUTEX(mut3); 3519 static DEFINE_MUTEX(mut4); 3520 static DEFINE_MUTEX(mut5); 3521 static DEFINE_MUTEX(mut6); 3522 static DEFINE_MUTEX(mut7); 3523 static DEFINE_MUTEX(mut8); 3524 static DEFINE_MUTEX(mut9); 3525 3526 static DECLARE_RWSEM(rwsem0); 3527 static DECLARE_RWSEM(rwsem1); 3528 static DECLARE_RWSEM(rwsem2); 3529 static DECLARE_RWSEM(rwsem3); 3530 static DECLARE_RWSEM(rwsem4); 3531 static DECLARE_RWSEM(rwsem5); 3532 static DECLARE_RWSEM(rwsem6); 3533 static DECLARE_RWSEM(rwsem7); 3534 static DECLARE_RWSEM(rwsem8); 3535 static DECLARE_RWSEM(rwsem9); 3536 3537 DEFINE_STATIC_SRCU(srcu0); 3538 DEFINE_STATIC_SRCU(srcu1); 3539 DEFINE_STATIC_SRCU(srcu2); 3540 DEFINE_STATIC_SRCU(srcu3); 3541 DEFINE_STATIC_SRCU(srcu4); 3542 DEFINE_STATIC_SRCU(srcu5); 3543 DEFINE_STATIC_SRCU(srcu6); 3544 DEFINE_STATIC_SRCU(srcu7); 3545 DEFINE_STATIC_SRCU(srcu8); 3546 DEFINE_STATIC_SRCU(srcu9); 3547 3548 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 3549 int cyclelen, int deadlock) 3550 { 3551 int j = i + 1; 3552 3553 if (j >= cyclelen) 3554 j = deadlock ? 0 : -1; 3555 if (j >= 0) 3556 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 3557 else 3558 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 3559 return j; 3560 } 3561 3562 // Test lockdep on SRCU-based deadlock scenarios. 3563 static void rcu_torture_init_srcu_lockdep(void) 3564 { 3565 int cyclelen; 3566 int deadlock; 3567 bool err = false; 3568 int i; 3569 int j; 3570 int idx; 3571 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 3572 &mut5, &mut6, &mut7, &mut8, &mut9 }; 3573 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 3574 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 3575 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 3576 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 3577 int testtype; 3578 3579 if (!test_srcu_lockdep) 3580 return; 3581 3582 deadlock = test_srcu_lockdep / 1000; 3583 testtype = (test_srcu_lockdep / 10) % 100; 3584 cyclelen = test_srcu_lockdep % 10; 3585 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 3586 if (WARN_ONCE(deadlock != !!deadlock, 3587 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 3588 __func__, test_srcu_lockdep, deadlock)) 3589 err = true; 3590 if (WARN_ONCE(cyclelen <= 0, 3591 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 3592 __func__, test_srcu_lockdep, cyclelen)) 3593 err = true; 3594 if (err) 3595 goto err_out; 3596 3597 if (testtype == 0) { 3598 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 3599 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3600 if (deadlock && cyclelen == 1) 3601 pr_info("%s: Expect hang.\n", __func__); 3602 for (i = 0; i < cyclelen; i++) { 3603 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 3604 "srcu_read_unlock", i, cyclelen, deadlock); 3605 idx = srcu_read_lock(srcus[i]); 3606 if (j >= 0) 3607 synchronize_srcu(srcus[j]); 3608 srcu_read_unlock(srcus[i], idx); 3609 } 3610 return; 3611 } 3612 3613 if (testtype == 1) { 3614 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 3615 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3616 for (i = 0; i < cyclelen; i++) { 3617 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 3618 __func__, i, i, i, i); 3619 idx = srcu_read_lock(srcus[i]); 3620 mutex_lock(muts[i]); 3621 mutex_unlock(muts[i]); 3622 srcu_read_unlock(srcus[i], idx); 3623 3624 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 3625 "mutex_unlock", i, cyclelen, deadlock); 3626 mutex_lock(muts[i]); 3627 if (j >= 0) 3628 synchronize_srcu(srcus[j]); 3629 mutex_unlock(muts[i]); 3630 } 3631 return; 3632 } 3633 3634 if (testtype == 2) { 3635 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 3636 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3637 for (i = 0; i < cyclelen; i++) { 3638 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 3639 __func__, i, i, i, i); 3640 idx = srcu_read_lock(srcus[i]); 3641 down_read(rwsems[i]); 3642 up_read(rwsems[i]); 3643 srcu_read_unlock(srcus[i], idx); 3644 3645 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 3646 "up_write", i, cyclelen, deadlock); 3647 down_write(rwsems[i]); 3648 if (j >= 0) 3649 synchronize_srcu(srcus[j]); 3650 up_write(rwsems[i]); 3651 } 3652 return; 3653 } 3654 3655 #ifdef CONFIG_TASKS_TRACE_RCU 3656 if (testtype == 3) { 3657 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 3658 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3659 if (deadlock && cyclelen == 1) 3660 pr_info("%s: Expect hang.\n", __func__); 3661 for (i = 0; i < cyclelen; i++) { 3662 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 3663 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 3664 : "synchronize_srcu"; 3665 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 3666 3667 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 3668 if (i == 0) 3669 rcu_read_lock_trace(); 3670 else 3671 idx = srcu_read_lock(srcus[i]); 3672 if (j >= 0) { 3673 if (i == cyclelen - 1) 3674 synchronize_rcu_tasks_trace(); 3675 else 3676 synchronize_srcu(srcus[j]); 3677 } 3678 if (i == 0) 3679 rcu_read_unlock_trace(); 3680 else 3681 srcu_read_unlock(srcus[i], idx); 3682 } 3683 return; 3684 } 3685 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 3686 3687 err_out: 3688 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 3689 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 3690 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 3691 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 3692 pr_info("%s: L: Cycle length.\n", __func__); 3693 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 3694 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 3695 } 3696 3697 static int __init 3698 rcu_torture_init(void) 3699 { 3700 long i; 3701 int cpu; 3702 int firsterr = 0; 3703 int flags = 0; 3704 unsigned long gp_seq = 0; 3705 static struct rcu_torture_ops *torture_ops[] = { 3706 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3707 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3708 &trivial_ops, 3709 }; 3710 3711 if (!torture_init_begin(torture_type, verbose)) 3712 return -EBUSY; 3713 3714 /* Process args and tell the world that the torturer is on the job. */ 3715 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3716 cur_ops = torture_ops[i]; 3717 if (strcmp(torture_type, cur_ops->name) == 0) 3718 break; 3719 } 3720 if (i == ARRAY_SIZE(torture_ops)) { 3721 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3722 torture_type); 3723 pr_alert("rcu-torture types:"); 3724 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3725 pr_cont(" %s", torture_ops[i]->name); 3726 pr_cont("\n"); 3727 firsterr = -EINVAL; 3728 cur_ops = NULL; 3729 goto unwind; 3730 } 3731 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3732 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3733 fqs_duration = 0; 3734 } 3735 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 3736 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3737 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 3738 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 3739 nocbs_nthreads = 0; 3740 } 3741 if (cur_ops->init) 3742 cur_ops->init(); 3743 3744 rcu_torture_init_srcu_lockdep(); 3745 3746 if (nreaders >= 0) { 3747 nrealreaders = nreaders; 3748 } else { 3749 nrealreaders = num_online_cpus() - 2 - nreaders; 3750 if (nrealreaders <= 0) 3751 nrealreaders = 1; 3752 } 3753 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3754 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3755 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3756 start_gp_seq = gp_seq; 3757 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3758 cur_ops->name, (long)gp_seq, flags); 3759 3760 /* Set up the freelist. */ 3761 3762 INIT_LIST_HEAD(&rcu_torture_freelist); 3763 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3764 rcu_tortures[i].rtort_mbtest = 0; 3765 list_add_tail(&rcu_tortures[i].rtort_free, 3766 &rcu_torture_freelist); 3767 } 3768 3769 /* Initialize the statistics so that each run gets its own numbers. */ 3770 3771 rcu_torture_current = NULL; 3772 rcu_torture_current_version = 0; 3773 atomic_set(&n_rcu_torture_alloc, 0); 3774 atomic_set(&n_rcu_torture_alloc_fail, 0); 3775 atomic_set(&n_rcu_torture_free, 0); 3776 atomic_set(&n_rcu_torture_mberror, 0); 3777 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3778 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3779 atomic_set(&n_rcu_torture_error, 0); 3780 n_rcu_torture_barrier_error = 0; 3781 n_rcu_torture_boost_ktrerror = 0; 3782 n_rcu_torture_boost_failure = 0; 3783 n_rcu_torture_boosts = 0; 3784 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3785 atomic_set(&rcu_torture_wcount[i], 0); 3786 for_each_possible_cpu(cpu) { 3787 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3788 per_cpu(rcu_torture_count, cpu)[i] = 0; 3789 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3790 } 3791 } 3792 err_segs_recorded = 0; 3793 rt_read_nsegs = 0; 3794 3795 /* Start up the kthreads. */ 3796 3797 rcu_torture_write_types(); 3798 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3799 writer_task); 3800 if (torture_init_error(firsterr)) 3801 goto unwind; 3802 if (nfakewriters > 0) { 3803 fakewriter_tasks = kcalloc(nfakewriters, 3804 sizeof(fakewriter_tasks[0]), 3805 GFP_KERNEL); 3806 if (fakewriter_tasks == NULL) { 3807 TOROUT_ERRSTRING("out of memory"); 3808 firsterr = -ENOMEM; 3809 goto unwind; 3810 } 3811 } 3812 for (i = 0; i < nfakewriters; i++) { 3813 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3814 NULL, fakewriter_tasks[i]); 3815 if (torture_init_error(firsterr)) 3816 goto unwind; 3817 } 3818 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3819 GFP_KERNEL); 3820 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3821 GFP_KERNEL); 3822 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3823 TOROUT_ERRSTRING("out of memory"); 3824 firsterr = -ENOMEM; 3825 goto unwind; 3826 } 3827 for (i = 0; i < nrealreaders; i++) { 3828 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3829 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3830 reader_tasks[i]); 3831 if (torture_init_error(firsterr)) 3832 goto unwind; 3833 } 3834 nrealnocbers = nocbs_nthreads; 3835 if (WARN_ON(nrealnocbers < 0)) 3836 nrealnocbers = 1; 3837 if (WARN_ON(nocbs_toggle < 0)) 3838 nocbs_toggle = HZ; 3839 if (nrealnocbers > 0) { 3840 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3841 if (nocb_tasks == NULL) { 3842 TOROUT_ERRSTRING("out of memory"); 3843 firsterr = -ENOMEM; 3844 goto unwind; 3845 } 3846 } else { 3847 nocb_tasks = NULL; 3848 } 3849 for (i = 0; i < nrealnocbers; i++) { 3850 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3851 if (torture_init_error(firsterr)) 3852 goto unwind; 3853 } 3854 if (stat_interval > 0) { 3855 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3856 stats_task); 3857 if (torture_init_error(firsterr)) 3858 goto unwind; 3859 } 3860 if (test_no_idle_hz && shuffle_interval > 0) { 3861 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3862 if (torture_init_error(firsterr)) 3863 goto unwind; 3864 } 3865 if (stutter < 0) 3866 stutter = 0; 3867 if (stutter) { 3868 int t; 3869 3870 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3871 firsterr = torture_stutter_init(stutter * HZ, t); 3872 if (torture_init_error(firsterr)) 3873 goto unwind; 3874 } 3875 if (fqs_duration < 0) 3876 fqs_duration = 0; 3877 if (fqs_holdoff < 0) 3878 fqs_holdoff = 0; 3879 if (fqs_duration && fqs_holdoff) { 3880 /* Create the fqs thread */ 3881 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3882 fqs_task); 3883 if (torture_init_error(firsterr)) 3884 goto unwind; 3885 } 3886 if (test_boost_interval < 1) 3887 test_boost_interval = 1; 3888 if (test_boost_duration < 2) 3889 test_boost_duration = 2; 3890 if (rcu_torture_can_boost()) { 3891 3892 boost_starttime = jiffies + test_boost_interval * HZ; 3893 3894 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3895 rcutorture_booster_init, 3896 rcutorture_booster_cleanup); 3897 rcutor_hp = firsterr; 3898 if (torture_init_error(firsterr)) 3899 goto unwind; 3900 } 3901 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3902 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3903 if (torture_init_error(firsterr)) 3904 goto unwind; 3905 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3906 rcutorture_sync); 3907 if (torture_init_error(firsterr)) 3908 goto unwind; 3909 firsterr = rcu_torture_stall_init(); 3910 if (torture_init_error(firsterr)) 3911 goto unwind; 3912 firsterr = rcu_torture_fwd_prog_init(); 3913 if (torture_init_error(firsterr)) 3914 goto unwind; 3915 firsterr = rcu_torture_barrier_init(); 3916 if (torture_init_error(firsterr)) 3917 goto unwind; 3918 firsterr = rcu_torture_read_exit_init(); 3919 if (torture_init_error(firsterr)) 3920 goto unwind; 3921 if (object_debug) 3922 rcu_test_debug_objects(); 3923 torture_init_end(); 3924 rcu_gp_slow_register(&rcu_fwd_cb_nodelay); 3925 return 0; 3926 3927 unwind: 3928 torture_init_end(); 3929 rcu_torture_cleanup(); 3930 if (shutdown_secs) { 3931 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3932 kernel_power_off(); 3933 } 3934 return firsterr; 3935 } 3936 3937 module_init(rcu_torture_init); 3938 module_exit(rcu_torture_cleanup); 3939