1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 /* Bits for ->extendables field, extendables param, and related definitions. */ 59 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 16 /* Put SRCU index in upper bits. */ 62 #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 64 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 65 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 66 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 67 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 68 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 69 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 70 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 71 #define RCUTORTURE_MAX_EXTEND \ 72 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 73 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 74 #define RCUTORTURE_RDR_ALLBITS \ 75 (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \ 76 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2) 77 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 78 /* Must be power of two minus one. */ 79 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 80 81 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 82 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 83 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 84 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 85 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 86 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 87 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 88 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 89 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 90 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 91 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 92 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 93 torture_param(bool, gp_cond_exp_full, false, 94 "Use conditional/async full-stateexpedited GP wait primitives"); 95 torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ, 96 "Wait interval for normal conditional grace periods, us (default 16 jiffies)"); 97 torture_param(int, gp_cond_wi_exp, 128, 98 "Wait interval for expedited conditional grace periods, us (default 128 us)"); 99 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 100 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 101 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 102 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 103 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 104 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 105 torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ, 106 "Wait interval for normal polled grace periods, us (default 16 jiffies)"); 107 torture_param(int, gp_poll_wi_exp, 128, 108 "Wait interval for expedited polled grace periods, us (default 128 us)"); 109 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 110 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 111 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 112 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 113 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 114 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 115 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 116 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 117 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 118 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 119 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 120 torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable"); 121 torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)"); 122 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 123 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 124 torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit."); 125 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 126 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 127 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 128 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 129 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 130 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 131 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 132 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one."); 133 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 134 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 135 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 136 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 137 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 138 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 139 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 140 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 141 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 142 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 143 144 static char *torture_type = "rcu"; 145 module_param(torture_type, charp, 0444); 146 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 147 148 static int nrealnocbers; 149 static int nrealreaders; 150 static struct task_struct *writer_task; 151 static struct task_struct **fakewriter_tasks; 152 static struct task_struct **reader_tasks; 153 static struct task_struct **nocb_tasks; 154 static struct task_struct *stats_task; 155 static struct task_struct *fqs_task; 156 static struct task_struct *boost_tasks[NR_CPUS]; 157 static struct task_struct *stall_task; 158 static struct task_struct **fwd_prog_tasks; 159 static struct task_struct **barrier_cbs_tasks; 160 static struct task_struct *barrier_task; 161 static struct task_struct *read_exit_task; 162 static struct task_struct *preempt_task; 163 164 #define RCU_TORTURE_PIPE_LEN 10 165 166 // Mailbox-like structure to check RCU global memory ordering. 167 struct rcu_torture_reader_check { 168 unsigned long rtc_myloops; 169 int rtc_chkrdr; 170 unsigned long rtc_chkloops; 171 int rtc_ready; 172 struct rcu_torture_reader_check *rtc_assigner; 173 } ____cacheline_internodealigned_in_smp; 174 175 // Update-side data structure used to check RCU readers. 176 struct rcu_torture { 177 struct rcu_head rtort_rcu; 178 int rtort_pipe_count; 179 struct list_head rtort_free; 180 int rtort_mbtest; 181 struct rcu_torture_reader_check *rtort_chkp; 182 }; 183 184 static LIST_HEAD(rcu_torture_freelist); 185 static struct rcu_torture __rcu *rcu_torture_current; 186 static unsigned long rcu_torture_current_version; 187 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 188 static DEFINE_SPINLOCK(rcu_torture_lock); 189 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 190 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 191 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 192 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 193 static atomic_t n_rcu_torture_alloc; 194 static atomic_t n_rcu_torture_alloc_fail; 195 static atomic_t n_rcu_torture_free; 196 static atomic_t n_rcu_torture_mberror; 197 static atomic_t n_rcu_torture_mbchk_fail; 198 static atomic_t n_rcu_torture_mbchk_tries; 199 static atomic_t n_rcu_torture_error; 200 static long n_rcu_torture_barrier_error; 201 static long n_rcu_torture_boost_ktrerror; 202 static long n_rcu_torture_boost_failure; 203 static long n_rcu_torture_boosts; 204 static atomic_long_t n_rcu_torture_timers; 205 static long n_barrier_attempts; 206 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 207 static unsigned long n_read_exits; 208 static struct list_head rcu_torture_removed; 209 static unsigned long shutdown_jiffies; 210 static unsigned long start_gp_seq; 211 static atomic_long_t n_nocb_offload; 212 static atomic_long_t n_nocb_deoffload; 213 214 static int rcu_torture_writer_state; 215 #define RTWS_FIXED_DELAY 0 216 #define RTWS_DELAY 1 217 #define RTWS_REPLACE 2 218 #define RTWS_DEF_FREE 3 219 #define RTWS_EXP_SYNC 4 220 #define RTWS_COND_GET 5 221 #define RTWS_COND_GET_FULL 6 222 #define RTWS_COND_GET_EXP 7 223 #define RTWS_COND_GET_EXP_FULL 8 224 #define RTWS_COND_SYNC 9 225 #define RTWS_COND_SYNC_FULL 10 226 #define RTWS_COND_SYNC_EXP 11 227 #define RTWS_COND_SYNC_EXP_FULL 12 228 #define RTWS_POLL_GET 13 229 #define RTWS_POLL_GET_FULL 14 230 #define RTWS_POLL_GET_EXP 15 231 #define RTWS_POLL_GET_EXP_FULL 16 232 #define RTWS_POLL_WAIT 17 233 #define RTWS_POLL_WAIT_FULL 18 234 #define RTWS_POLL_WAIT_EXP 19 235 #define RTWS_POLL_WAIT_EXP_FULL 20 236 #define RTWS_SYNC 21 237 #define RTWS_STUTTER 22 238 #define RTWS_STOPPING 23 239 static const char * const rcu_torture_writer_state_names[] = { 240 "RTWS_FIXED_DELAY", 241 "RTWS_DELAY", 242 "RTWS_REPLACE", 243 "RTWS_DEF_FREE", 244 "RTWS_EXP_SYNC", 245 "RTWS_COND_GET", 246 "RTWS_COND_GET_FULL", 247 "RTWS_COND_GET_EXP", 248 "RTWS_COND_GET_EXP_FULL", 249 "RTWS_COND_SYNC", 250 "RTWS_COND_SYNC_FULL", 251 "RTWS_COND_SYNC_EXP", 252 "RTWS_COND_SYNC_EXP_FULL", 253 "RTWS_POLL_GET", 254 "RTWS_POLL_GET_FULL", 255 "RTWS_POLL_GET_EXP", 256 "RTWS_POLL_GET_EXP_FULL", 257 "RTWS_POLL_WAIT", 258 "RTWS_POLL_WAIT_FULL", 259 "RTWS_POLL_WAIT_EXP", 260 "RTWS_POLL_WAIT_EXP_FULL", 261 "RTWS_SYNC", 262 "RTWS_STUTTER", 263 "RTWS_STOPPING", 264 }; 265 266 /* Record reader segment types and duration for first failing read. */ 267 struct rt_read_seg { 268 int rt_readstate; 269 unsigned long rt_delay_jiffies; 270 unsigned long rt_delay_ms; 271 unsigned long rt_delay_us; 272 bool rt_preempted; 273 int rt_cpu; 274 int rt_end_cpu; 275 }; 276 static int err_segs_recorded; 277 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 278 static int rt_read_nsegs; 279 static int rt_read_preempted; 280 281 static const char *rcu_torture_writer_state_getname(void) 282 { 283 unsigned int i = READ_ONCE(rcu_torture_writer_state); 284 285 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 286 return "???"; 287 return rcu_torture_writer_state_names[i]; 288 } 289 290 #ifdef CONFIG_RCU_TRACE 291 static u64 notrace rcu_trace_clock_local(void) 292 { 293 u64 ts = trace_clock_local(); 294 295 (void)do_div(ts, NSEC_PER_USEC); 296 return ts; 297 } 298 #else /* #ifdef CONFIG_RCU_TRACE */ 299 static u64 notrace rcu_trace_clock_local(void) 300 { 301 return 0ULL; 302 } 303 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 304 305 /* 306 * Stop aggressive CPU-hog tests a bit before the end of the test in order 307 * to avoid interfering with test shutdown. 308 */ 309 static bool shutdown_time_arrived(void) 310 { 311 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 312 } 313 314 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 315 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 316 /* and boost task create/destroy. */ 317 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 318 static bool barrier_phase; /* Test phase. */ 319 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 320 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 321 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 322 323 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 324 325 /* 326 * Allocate an element from the rcu_tortures pool. 327 */ 328 static struct rcu_torture * 329 rcu_torture_alloc(void) 330 { 331 struct list_head *p; 332 333 spin_lock_bh(&rcu_torture_lock); 334 if (list_empty(&rcu_torture_freelist)) { 335 atomic_inc(&n_rcu_torture_alloc_fail); 336 spin_unlock_bh(&rcu_torture_lock); 337 return NULL; 338 } 339 atomic_inc(&n_rcu_torture_alloc); 340 p = rcu_torture_freelist.next; 341 list_del_init(p); 342 spin_unlock_bh(&rcu_torture_lock); 343 return container_of(p, struct rcu_torture, rtort_free); 344 } 345 346 /* 347 * Free an element to the rcu_tortures pool. 348 */ 349 static void 350 rcu_torture_free(struct rcu_torture *p) 351 { 352 atomic_inc(&n_rcu_torture_free); 353 spin_lock_bh(&rcu_torture_lock); 354 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 355 spin_unlock_bh(&rcu_torture_lock); 356 } 357 358 /* 359 * Operations vector for selecting different types of tests. 360 */ 361 362 struct rcu_torture_ops { 363 int ttype; 364 void (*init)(void); 365 void (*cleanup)(void); 366 int (*readlock)(void); 367 void (*read_delay)(struct torture_random_state *rrsp, 368 struct rt_read_seg *rtrsp); 369 void (*readunlock)(int idx); 370 int (*readlock_held)(void); // lockdep. 371 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not. 372 unsigned long (*get_gp_seq)(void); 373 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 374 void (*deferred_free)(struct rcu_torture *p); 375 void (*sync)(void); 376 void (*exp_sync)(void); 377 unsigned long (*get_gp_state_exp)(void); 378 unsigned long (*start_gp_poll_exp)(void); 379 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 380 bool (*poll_gp_state_exp)(unsigned long oldstate); 381 void (*cond_sync_exp)(unsigned long oldstate); 382 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 383 unsigned long (*get_comp_state)(void); 384 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 385 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 386 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 387 unsigned long (*get_gp_state)(void); 388 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 389 unsigned long (*start_gp_poll)(void); 390 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 391 bool (*poll_gp_state)(unsigned long oldstate); 392 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 393 bool (*poll_need_2gp)(bool poll, bool poll_full); 394 void (*cond_sync)(unsigned long oldstate); 395 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 396 int poll_active; 397 int poll_active_full; 398 call_rcu_func_t call; 399 void (*cb_barrier)(void); 400 void (*fqs)(void); 401 void (*stats)(void); 402 void (*gp_kthread_dbg)(void); 403 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 404 int (*stall_dur)(void); 405 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 406 void (*gp_slow_register)(atomic_t *rgssp); 407 void (*gp_slow_unregister)(atomic_t *rgssp); 408 bool (*reader_blocked)(void); 409 long cbflood_max; 410 int irq_capable; 411 int can_boost; 412 int extendables; 413 int slow_gps; 414 int no_pi_lock; 415 int debug_objects; 416 int start_poll_irqsoff; 417 const char *name; 418 }; 419 420 static struct rcu_torture_ops *cur_ops; 421 422 /* 423 * Definitions for rcu torture testing. 424 */ 425 426 static int torture_readlock_not_held(void) 427 { 428 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 429 } 430 431 static int rcu_torture_read_lock(void) 432 { 433 rcu_read_lock(); 434 return 0; 435 } 436 437 static void 438 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 439 { 440 unsigned long started; 441 unsigned long completed; 442 const unsigned long shortdelay_us = 200; 443 unsigned long longdelay_ms = 300; 444 unsigned long long ts; 445 446 /* We want a short delay sometimes to make a reader delay the grace 447 * period, and we want a long delay occasionally to trigger 448 * force_quiescent_state. */ 449 450 if (!atomic_read(&rcu_fwd_cb_nodelay) && 451 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 452 started = cur_ops->get_gp_seq(); 453 ts = rcu_trace_clock_local(); 454 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 455 longdelay_ms = 5; /* Avoid triggering BH limits. */ 456 mdelay(longdelay_ms); 457 rtrsp->rt_delay_ms = longdelay_ms; 458 completed = cur_ops->get_gp_seq(); 459 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 460 started, completed); 461 } 462 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 463 udelay(shortdelay_us); 464 rtrsp->rt_delay_us = shortdelay_us; 465 } 466 if (!preempt_count() && 467 !(torture_random(rrsp) % (nrealreaders * 500))) 468 torture_preempt_schedule(); /* QS only if preemptible. */ 469 } 470 471 static void rcu_torture_read_unlock(int idx) 472 { 473 rcu_read_unlock(); 474 } 475 476 static int rcu_torture_readlock_nesting(void) 477 { 478 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 479 return rcu_preempt_depth(); 480 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 481 return (preempt_count() & PREEMPT_MASK); 482 return -1; 483 } 484 485 /* 486 * Update callback in the pipe. This should be invoked after a grace period. 487 */ 488 static bool 489 rcu_torture_pipe_update_one(struct rcu_torture *rp) 490 { 491 int i; 492 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 493 494 if (rtrcp) { 495 WRITE_ONCE(rp->rtort_chkp, NULL); 496 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 497 } 498 i = rp->rtort_pipe_count; 499 if (i > RCU_TORTURE_PIPE_LEN) 500 i = RCU_TORTURE_PIPE_LEN; 501 atomic_inc(&rcu_torture_wcount[i]); 502 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 503 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 504 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 505 rp->rtort_mbtest = 0; 506 return true; 507 } 508 return false; 509 } 510 511 /* 512 * Update all callbacks in the pipe. Suitable for synchronous grace-period 513 * primitives. 514 */ 515 static void 516 rcu_torture_pipe_update(struct rcu_torture *old_rp) 517 { 518 struct rcu_torture *rp; 519 struct rcu_torture *rp1; 520 521 if (old_rp) 522 list_add(&old_rp->rtort_free, &rcu_torture_removed); 523 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 524 if (rcu_torture_pipe_update_one(rp)) { 525 list_del(&rp->rtort_free); 526 rcu_torture_free(rp); 527 } 528 } 529 } 530 531 static void 532 rcu_torture_cb(struct rcu_head *p) 533 { 534 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 535 536 if (torture_must_stop_irq()) { 537 /* Test is ending, just drop callbacks on the floor. */ 538 /* The next initialization will pick up the pieces. */ 539 return; 540 } 541 if (rcu_torture_pipe_update_one(rp)) 542 rcu_torture_free(rp); 543 else 544 cur_ops->deferred_free(rp); 545 } 546 547 static unsigned long rcu_no_completed(void) 548 { 549 return 0; 550 } 551 552 static void rcu_torture_deferred_free(struct rcu_torture *p) 553 { 554 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 555 } 556 557 static void rcu_sync_torture_init(void) 558 { 559 INIT_LIST_HEAD(&rcu_torture_removed); 560 } 561 562 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 563 { 564 return poll; 565 } 566 567 static struct rcu_torture_ops rcu_ops = { 568 .ttype = RCU_FLAVOR, 569 .init = rcu_sync_torture_init, 570 .readlock = rcu_torture_read_lock, 571 .read_delay = rcu_read_delay, 572 .readunlock = rcu_torture_read_unlock, 573 .readlock_held = torture_readlock_not_held, 574 .readlock_nesting = rcu_torture_readlock_nesting, 575 .get_gp_seq = rcu_get_gp_seq, 576 .gp_diff = rcu_seq_diff, 577 .deferred_free = rcu_torture_deferred_free, 578 .sync = synchronize_rcu, 579 .exp_sync = synchronize_rcu_expedited, 580 .same_gp_state = same_state_synchronize_rcu, 581 .same_gp_state_full = same_state_synchronize_rcu_full, 582 .get_comp_state = get_completed_synchronize_rcu, 583 .get_comp_state_full = get_completed_synchronize_rcu_full, 584 .get_gp_state = get_state_synchronize_rcu, 585 .get_gp_state_full = get_state_synchronize_rcu_full, 586 .start_gp_poll = start_poll_synchronize_rcu, 587 .start_gp_poll_full = start_poll_synchronize_rcu_full, 588 .poll_gp_state = poll_state_synchronize_rcu, 589 .poll_gp_state_full = poll_state_synchronize_rcu_full, 590 .poll_need_2gp = rcu_poll_need_2gp, 591 .cond_sync = cond_synchronize_rcu, 592 .cond_sync_full = cond_synchronize_rcu_full, 593 .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE, 594 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE, 595 .get_gp_state_exp = get_state_synchronize_rcu, 596 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 597 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 598 .poll_gp_state_exp = poll_state_synchronize_rcu, 599 .cond_sync_exp = cond_synchronize_rcu_expedited, 600 .cond_sync_exp_full = cond_synchronize_rcu_expedited_full, 601 .call = call_rcu_hurry, 602 .cb_barrier = rcu_barrier, 603 .fqs = rcu_force_quiescent_state, 604 .gp_kthread_dbg = show_rcu_gp_kthreads, 605 .check_boost_failed = rcu_check_boost_fail, 606 .stall_dur = rcu_jiffies_till_stall_check, 607 .get_gp_data = rcutorture_get_gp_data, 608 .gp_slow_register = rcu_gp_slow_register, 609 .gp_slow_unregister = rcu_gp_slow_unregister, 610 .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU) 611 ? has_rcu_reader_blocked 612 : NULL, 613 .irq_capable = 1, 614 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 615 .extendables = RCUTORTURE_MAX_EXTEND, 616 .debug_objects = 1, 617 .start_poll_irqsoff = 1, 618 .name = "rcu" 619 }; 620 621 /* 622 * Don't even think about trying any of these in real life!!! 623 * The names includes "busted", and they really means it! 624 * The only purpose of these functions is to provide a buggy RCU 625 * implementation to make sure that rcutorture correctly emits 626 * buggy-RCU error messages. 627 */ 628 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 629 { 630 /* This is a deliberate bug for testing purposes only! */ 631 rcu_torture_cb(&p->rtort_rcu); 632 } 633 634 static void synchronize_rcu_busted(void) 635 { 636 /* This is a deliberate bug for testing purposes only! */ 637 } 638 639 static void 640 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 641 { 642 /* This is a deliberate bug for testing purposes only! */ 643 func(head); 644 } 645 646 static struct rcu_torture_ops rcu_busted_ops = { 647 .ttype = INVALID_RCU_FLAVOR, 648 .init = rcu_sync_torture_init, 649 .readlock = rcu_torture_read_lock, 650 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 651 .readunlock = rcu_torture_read_unlock, 652 .readlock_held = torture_readlock_not_held, 653 .get_gp_seq = rcu_no_completed, 654 .deferred_free = rcu_busted_torture_deferred_free, 655 .sync = synchronize_rcu_busted, 656 .exp_sync = synchronize_rcu_busted, 657 .call = call_rcu_busted, 658 .irq_capable = 1, 659 .extendables = RCUTORTURE_MAX_EXTEND, 660 .name = "busted" 661 }; 662 663 /* 664 * Definitions for srcu torture testing. 665 */ 666 667 DEFINE_STATIC_SRCU(srcu_ctl); 668 static struct srcu_struct srcu_ctld; 669 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 670 static struct rcu_torture_ops srcud_ops; 671 672 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 673 { 674 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 675 } 676 677 static int srcu_torture_read_lock(void) 678 { 679 int idx; 680 int ret = 0; 681 682 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 683 idx = srcu_read_lock(srcu_ctlp); 684 WARN_ON_ONCE(idx & ~0x1); 685 ret += idx; 686 } 687 if (reader_flavor & SRCU_READ_FLAVOR_NMI) { 688 idx = srcu_read_lock_nmisafe(srcu_ctlp); 689 WARN_ON_ONCE(idx & ~0x1); 690 ret += idx << 1; 691 } 692 if (reader_flavor & SRCU_READ_FLAVOR_LITE) { 693 idx = srcu_read_lock_lite(srcu_ctlp); 694 WARN_ON_ONCE(idx & ~0x1); 695 ret += idx << 2; 696 } 697 return ret; 698 } 699 700 static void 701 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 702 { 703 long delay; 704 const long uspertick = 1000000 / HZ; 705 const long longdelay = 10; 706 707 /* We want there to be long-running readers, but not all the time. */ 708 709 delay = torture_random(rrsp) % 710 (nrealreaders * 2 * longdelay * uspertick); 711 if (!delay && in_task()) { 712 schedule_timeout_interruptible(longdelay); 713 rtrsp->rt_delay_jiffies = longdelay; 714 } else { 715 rcu_read_delay(rrsp, rtrsp); 716 } 717 } 718 719 static void srcu_torture_read_unlock(int idx) 720 { 721 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 722 if (reader_flavor & SRCU_READ_FLAVOR_LITE) 723 srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2); 724 if (reader_flavor & SRCU_READ_FLAVOR_NMI) 725 srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1); 726 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 727 srcu_read_unlock(srcu_ctlp, idx & 0x1); 728 } 729 730 static int torture_srcu_read_lock_held(void) 731 { 732 return srcu_read_lock_held(srcu_ctlp); 733 } 734 735 static unsigned long srcu_torture_completed(void) 736 { 737 return srcu_batches_completed(srcu_ctlp); 738 } 739 740 static void srcu_torture_deferred_free(struct rcu_torture *rp) 741 { 742 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 743 } 744 745 static void srcu_torture_synchronize(void) 746 { 747 synchronize_srcu(srcu_ctlp); 748 } 749 750 static unsigned long srcu_torture_get_gp_state(void) 751 { 752 return get_state_synchronize_srcu(srcu_ctlp); 753 } 754 755 static unsigned long srcu_torture_start_gp_poll(void) 756 { 757 return start_poll_synchronize_srcu(srcu_ctlp); 758 } 759 760 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 761 { 762 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 763 } 764 765 static void srcu_torture_call(struct rcu_head *head, 766 rcu_callback_t func) 767 { 768 call_srcu(srcu_ctlp, head, func); 769 } 770 771 static void srcu_torture_barrier(void) 772 { 773 srcu_barrier(srcu_ctlp); 774 } 775 776 static void srcu_torture_stats(void) 777 { 778 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 779 } 780 781 static void srcu_torture_synchronize_expedited(void) 782 { 783 synchronize_srcu_expedited(srcu_ctlp); 784 } 785 786 static struct rcu_torture_ops srcu_ops = { 787 .ttype = SRCU_FLAVOR, 788 .init = rcu_sync_torture_init, 789 .readlock = srcu_torture_read_lock, 790 .read_delay = srcu_read_delay, 791 .readunlock = srcu_torture_read_unlock, 792 .readlock_held = torture_srcu_read_lock_held, 793 .get_gp_seq = srcu_torture_completed, 794 .deferred_free = srcu_torture_deferred_free, 795 .sync = srcu_torture_synchronize, 796 .exp_sync = srcu_torture_synchronize_expedited, 797 .same_gp_state = same_state_synchronize_srcu, 798 .get_comp_state = get_completed_synchronize_srcu, 799 .get_gp_state = srcu_torture_get_gp_state, 800 .start_gp_poll = srcu_torture_start_gp_poll, 801 .poll_gp_state = srcu_torture_poll_gp_state, 802 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 803 .call = srcu_torture_call, 804 .cb_barrier = srcu_torture_barrier, 805 .stats = srcu_torture_stats, 806 .get_gp_data = srcu_get_gp_data, 807 .cbflood_max = 50000, 808 .irq_capable = 1, 809 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 810 .debug_objects = 1, 811 .name = "srcu" 812 }; 813 814 static void srcu_torture_init(void) 815 { 816 rcu_sync_torture_init(); 817 WARN_ON(init_srcu_struct(&srcu_ctld)); 818 srcu_ctlp = &srcu_ctld; 819 } 820 821 static void srcu_torture_cleanup(void) 822 { 823 cleanup_srcu_struct(&srcu_ctld); 824 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 825 } 826 827 /* As above, but dynamically allocated. */ 828 static struct rcu_torture_ops srcud_ops = { 829 .ttype = SRCU_FLAVOR, 830 .init = srcu_torture_init, 831 .cleanup = srcu_torture_cleanup, 832 .readlock = srcu_torture_read_lock, 833 .read_delay = srcu_read_delay, 834 .readunlock = srcu_torture_read_unlock, 835 .readlock_held = torture_srcu_read_lock_held, 836 .get_gp_seq = srcu_torture_completed, 837 .deferred_free = srcu_torture_deferred_free, 838 .sync = srcu_torture_synchronize, 839 .exp_sync = srcu_torture_synchronize_expedited, 840 .same_gp_state = same_state_synchronize_srcu, 841 .get_comp_state = get_completed_synchronize_srcu, 842 .get_gp_state = srcu_torture_get_gp_state, 843 .start_gp_poll = srcu_torture_start_gp_poll, 844 .poll_gp_state = srcu_torture_poll_gp_state, 845 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 846 .call = srcu_torture_call, 847 .cb_barrier = srcu_torture_barrier, 848 .stats = srcu_torture_stats, 849 .get_gp_data = srcu_get_gp_data, 850 .cbflood_max = 50000, 851 .irq_capable = 1, 852 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 853 .debug_objects = 1, 854 .name = "srcud" 855 }; 856 857 /* As above, but broken due to inappropriate reader extension. */ 858 static struct rcu_torture_ops busted_srcud_ops = { 859 .ttype = SRCU_FLAVOR, 860 .init = srcu_torture_init, 861 .cleanup = srcu_torture_cleanup, 862 .readlock = srcu_torture_read_lock, 863 .read_delay = rcu_read_delay, 864 .readunlock = srcu_torture_read_unlock, 865 .readlock_held = torture_srcu_read_lock_held, 866 .get_gp_seq = srcu_torture_completed, 867 .deferred_free = srcu_torture_deferred_free, 868 .sync = srcu_torture_synchronize, 869 .exp_sync = srcu_torture_synchronize_expedited, 870 .call = srcu_torture_call, 871 .cb_barrier = srcu_torture_barrier, 872 .stats = srcu_torture_stats, 873 .irq_capable = 1, 874 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 875 .extendables = RCUTORTURE_MAX_EXTEND, 876 .name = "busted_srcud" 877 }; 878 879 /* 880 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 881 * This implementation does not necessarily work well with CPU hotplug. 882 */ 883 884 static void synchronize_rcu_trivial(void) 885 { 886 int cpu; 887 888 for_each_online_cpu(cpu) { 889 torture_sched_setaffinity(current->pid, cpumask_of(cpu), true); 890 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 891 } 892 } 893 894 static int rcu_torture_read_lock_trivial(void) 895 { 896 preempt_disable(); 897 return 0; 898 } 899 900 static void rcu_torture_read_unlock_trivial(int idx) 901 { 902 preempt_enable(); 903 } 904 905 static struct rcu_torture_ops trivial_ops = { 906 .ttype = RCU_TRIVIAL_FLAVOR, 907 .init = rcu_sync_torture_init, 908 .readlock = rcu_torture_read_lock_trivial, 909 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 910 .readunlock = rcu_torture_read_unlock_trivial, 911 .readlock_held = torture_readlock_not_held, 912 .get_gp_seq = rcu_no_completed, 913 .sync = synchronize_rcu_trivial, 914 .exp_sync = synchronize_rcu_trivial, 915 .irq_capable = 1, 916 .name = "trivial" 917 }; 918 919 #ifdef CONFIG_TASKS_RCU 920 921 /* 922 * Definitions for RCU-tasks torture testing. 923 */ 924 925 static int tasks_torture_read_lock(void) 926 { 927 return 0; 928 } 929 930 static void tasks_torture_read_unlock(int idx) 931 { 932 } 933 934 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 935 { 936 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 937 } 938 939 static void synchronize_rcu_mult_test(void) 940 { 941 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 942 } 943 944 static struct rcu_torture_ops tasks_ops = { 945 .ttype = RCU_TASKS_FLAVOR, 946 .init = rcu_sync_torture_init, 947 .readlock = tasks_torture_read_lock, 948 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 949 .readunlock = tasks_torture_read_unlock, 950 .get_gp_seq = rcu_no_completed, 951 .deferred_free = rcu_tasks_torture_deferred_free, 952 .sync = synchronize_rcu_tasks, 953 .exp_sync = synchronize_rcu_mult_test, 954 .call = call_rcu_tasks, 955 .cb_barrier = rcu_barrier_tasks, 956 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 957 .get_gp_data = rcu_tasks_get_gp_data, 958 .irq_capable = 1, 959 .slow_gps = 1, 960 .name = "tasks" 961 }; 962 963 #define TASKS_OPS &tasks_ops, 964 965 #else // #ifdef CONFIG_TASKS_RCU 966 967 #define TASKS_OPS 968 969 #endif // #else #ifdef CONFIG_TASKS_RCU 970 971 972 #ifdef CONFIG_TASKS_RUDE_RCU 973 974 /* 975 * Definitions for rude RCU-tasks torture testing. 976 */ 977 978 static struct rcu_torture_ops tasks_rude_ops = { 979 .ttype = RCU_TASKS_RUDE_FLAVOR, 980 .init = rcu_sync_torture_init, 981 .readlock = rcu_torture_read_lock_trivial, 982 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 983 .readunlock = rcu_torture_read_unlock_trivial, 984 .get_gp_seq = rcu_no_completed, 985 .sync = synchronize_rcu_tasks_rude, 986 .exp_sync = synchronize_rcu_tasks_rude, 987 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 988 .get_gp_data = rcu_tasks_rude_get_gp_data, 989 .cbflood_max = 50000, 990 .irq_capable = 1, 991 .name = "tasks-rude" 992 }; 993 994 #define TASKS_RUDE_OPS &tasks_rude_ops, 995 996 #else // #ifdef CONFIG_TASKS_RUDE_RCU 997 998 #define TASKS_RUDE_OPS 999 1000 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 1001 1002 1003 #ifdef CONFIG_TASKS_TRACE_RCU 1004 1005 /* 1006 * Definitions for tracing RCU-tasks torture testing. 1007 */ 1008 1009 static int tasks_tracing_torture_read_lock(void) 1010 { 1011 rcu_read_lock_trace(); 1012 return 0; 1013 } 1014 1015 static void tasks_tracing_torture_read_unlock(int idx) 1016 { 1017 rcu_read_unlock_trace(); 1018 } 1019 1020 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 1021 { 1022 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 1023 } 1024 1025 static struct rcu_torture_ops tasks_tracing_ops = { 1026 .ttype = RCU_TASKS_TRACING_FLAVOR, 1027 .init = rcu_sync_torture_init, 1028 .readlock = tasks_tracing_torture_read_lock, 1029 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 1030 .readunlock = tasks_tracing_torture_read_unlock, 1031 .readlock_held = rcu_read_lock_trace_held, 1032 .get_gp_seq = rcu_no_completed, 1033 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 1034 .sync = synchronize_rcu_tasks_trace, 1035 .exp_sync = synchronize_rcu_tasks_trace, 1036 .call = call_rcu_tasks_trace, 1037 .cb_barrier = rcu_barrier_tasks_trace, 1038 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 1039 .get_gp_data = rcu_tasks_trace_get_gp_data, 1040 .cbflood_max = 50000, 1041 .irq_capable = 1, 1042 .slow_gps = 1, 1043 .name = "tasks-tracing" 1044 }; 1045 1046 #define TASKS_TRACING_OPS &tasks_tracing_ops, 1047 1048 #else // #ifdef CONFIG_TASKS_TRACE_RCU 1049 1050 #define TASKS_TRACING_OPS 1051 1052 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 1053 1054 1055 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 1056 { 1057 if (!cur_ops->gp_diff) 1058 return new - old; 1059 return cur_ops->gp_diff(new, old); 1060 } 1061 1062 /* 1063 * RCU torture priority-boost testing. Runs one real-time thread per 1064 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1065 * for them to complete. If a given grace period takes too long, we assume 1066 * that priority inversion has occurred. 1067 */ 1068 1069 static int old_rt_runtime = -1; 1070 1071 static void rcu_torture_disable_rt_throttle(void) 1072 { 1073 /* 1074 * Disable RT throttling so that rcutorture's boost threads don't get 1075 * throttled. Only possible if rcutorture is built-in otherwise the 1076 * user should manually do this by setting the sched_rt_period_us and 1077 * sched_rt_runtime sysctls. 1078 */ 1079 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1080 return; 1081 1082 old_rt_runtime = sysctl_sched_rt_runtime; 1083 sysctl_sched_rt_runtime = -1; 1084 } 1085 1086 static void rcu_torture_enable_rt_throttle(void) 1087 { 1088 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1089 return; 1090 1091 sysctl_sched_rt_runtime = old_rt_runtime; 1092 old_rt_runtime = -1; 1093 } 1094 1095 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1096 { 1097 int cpu; 1098 static int dbg_done; 1099 unsigned long end = jiffies; 1100 bool gp_done; 1101 unsigned long j; 1102 static unsigned long last_persist; 1103 unsigned long lp; 1104 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1105 1106 if (end - *start > mininterval) { 1107 // Recheck after checking time to avoid false positives. 1108 smp_mb(); // Time check before grace-period check. 1109 if (cur_ops->poll_gp_state(gp_state)) 1110 return false; // passed, though perhaps just barely 1111 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1112 // At most one persisted message per boost test. 1113 j = jiffies; 1114 lp = READ_ONCE(last_persist); 1115 if (time_after(j, lp + mininterval) && 1116 cmpxchg(&last_persist, lp, j) == lp) { 1117 if (cpu < 0) 1118 pr_info("Boost inversion persisted: QS from all CPUs\n"); 1119 else 1120 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1121 } 1122 return false; // passed on a technicality 1123 } 1124 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1125 n_rcu_torture_boost_failure++; 1126 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1127 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1128 current->rt_priority, gp_state, end - *start); 1129 cur_ops->gp_kthread_dbg(); 1130 // Recheck after print to flag grace period ending during splat. 1131 gp_done = cur_ops->poll_gp_state(gp_state); 1132 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1133 gp_done ? "ended already" : "still pending"); 1134 1135 } 1136 1137 return true; // failed 1138 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1139 *start = jiffies; 1140 } 1141 1142 return false; // passed 1143 } 1144 1145 static int rcu_torture_boost(void *arg) 1146 { 1147 unsigned long endtime; 1148 unsigned long gp_state; 1149 unsigned long gp_state_time; 1150 unsigned long oldstarttime; 1151 1152 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1153 1154 /* Set real-time priority. */ 1155 sched_set_fifo_low(current); 1156 1157 /* Each pass through the following loop does one boost-test cycle. */ 1158 do { 1159 bool failed = false; // Test failed already in this test interval 1160 bool gp_initiated = false; 1161 1162 if (kthread_should_stop()) 1163 goto checkwait; 1164 1165 /* Wait for the next test interval. */ 1166 oldstarttime = READ_ONCE(boost_starttime); 1167 while (time_before(jiffies, oldstarttime)) { 1168 schedule_timeout_interruptible(oldstarttime - jiffies); 1169 if (stutter_wait("rcu_torture_boost")) 1170 sched_set_fifo_low(current); 1171 if (torture_must_stop()) 1172 goto checkwait; 1173 } 1174 1175 // Do one boost-test interval. 1176 endtime = oldstarttime + test_boost_duration * HZ; 1177 while (time_before(jiffies, endtime)) { 1178 // Has current GP gone too long? 1179 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1180 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1181 // If we don't have a grace period in flight, start one. 1182 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1183 gp_state = cur_ops->start_gp_poll(); 1184 gp_initiated = true; 1185 gp_state_time = jiffies; 1186 } 1187 if (stutter_wait("rcu_torture_boost")) { 1188 sched_set_fifo_low(current); 1189 // If the grace period already ended, 1190 // we don't know when that happened, so 1191 // start over. 1192 if (cur_ops->poll_gp_state(gp_state)) 1193 gp_initiated = false; 1194 } 1195 if (torture_must_stop()) 1196 goto checkwait; 1197 } 1198 1199 // In case the grace period extended beyond the end of the loop. 1200 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1201 rcu_torture_boost_failed(gp_state, &gp_state_time); 1202 1203 /* 1204 * Set the start time of the next test interval. 1205 * Yes, this is vulnerable to long delays, but such 1206 * delays simply cause a false negative for the next 1207 * interval. Besides, we are running at RT priority, 1208 * so delays should be relatively rare. 1209 */ 1210 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1211 if (mutex_trylock(&boost_mutex)) { 1212 if (oldstarttime == boost_starttime) { 1213 WRITE_ONCE(boost_starttime, 1214 jiffies + test_boost_interval * HZ); 1215 n_rcu_torture_boosts++; 1216 } 1217 mutex_unlock(&boost_mutex); 1218 break; 1219 } 1220 schedule_timeout_uninterruptible(HZ / 20); 1221 } 1222 1223 /* Go do the stutter. */ 1224 checkwait: if (stutter_wait("rcu_torture_boost")) 1225 sched_set_fifo_low(current); 1226 } while (!torture_must_stop()); 1227 1228 /* Clean up and exit. */ 1229 while (!kthread_should_stop()) { 1230 torture_shutdown_absorb("rcu_torture_boost"); 1231 schedule_timeout_uninterruptible(HZ / 20); 1232 } 1233 torture_kthread_stopping("rcu_torture_boost"); 1234 return 0; 1235 } 1236 1237 /* 1238 * RCU torture force-quiescent-state kthread. Repeatedly induces 1239 * bursts of calls to force_quiescent_state(), increasing the probability 1240 * of occurrence of some important types of race conditions. 1241 */ 1242 static int 1243 rcu_torture_fqs(void *arg) 1244 { 1245 unsigned long fqs_resume_time; 1246 int fqs_burst_remaining; 1247 int oldnice = task_nice(current); 1248 1249 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1250 do { 1251 fqs_resume_time = jiffies + fqs_stutter * HZ; 1252 while (time_before(jiffies, fqs_resume_time) && 1253 !kthread_should_stop()) { 1254 schedule_timeout_interruptible(HZ / 20); 1255 } 1256 fqs_burst_remaining = fqs_duration; 1257 while (fqs_burst_remaining > 0 && 1258 !kthread_should_stop()) { 1259 cur_ops->fqs(); 1260 udelay(fqs_holdoff); 1261 fqs_burst_remaining -= fqs_holdoff; 1262 } 1263 if (stutter_wait("rcu_torture_fqs")) 1264 sched_set_normal(current, oldnice); 1265 } while (!torture_must_stop()); 1266 torture_kthread_stopping("rcu_torture_fqs"); 1267 return 0; 1268 } 1269 1270 // Used by writers to randomly choose from the available grace-period primitives. 1271 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1272 static int nsynctypes; 1273 1274 /* 1275 * Determine which grace-period primitives are available. 1276 */ 1277 static void rcu_torture_write_types(void) 1278 { 1279 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1280 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1281 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1282 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1283 1284 /* Initialize synctype[] array. If none set, take default. */ 1285 if (!gp_cond1 && 1286 !gp_cond_exp1 && 1287 !gp_cond_full1 && 1288 !gp_cond_exp_full1 && 1289 !gp_exp1 && 1290 !gp_poll_exp1 && 1291 !gp_poll_exp_full1 && 1292 !gp_normal1 && 1293 !gp_poll1 && 1294 !gp_poll_full1 && 1295 !gp_sync1) { 1296 gp_cond1 = true; 1297 gp_cond_exp1 = true; 1298 gp_cond_full1 = true; 1299 gp_cond_exp_full1 = true; 1300 gp_exp1 = true; 1301 gp_poll_exp1 = true; 1302 gp_poll_exp_full1 = true; 1303 gp_normal1 = true; 1304 gp_poll1 = true; 1305 gp_poll_full1 = true; 1306 gp_sync1 = true; 1307 } 1308 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1309 synctype[nsynctypes++] = RTWS_COND_GET; 1310 pr_info("%s: Testing conditional GPs.\n", __func__); 1311 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1312 pr_alert("%s: gp_cond without primitives.\n", __func__); 1313 } 1314 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1315 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1316 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1317 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1318 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1319 } 1320 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1321 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1322 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1323 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1324 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1325 } 1326 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1327 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1328 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1329 } else if (gp_cond_exp_full && 1330 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1331 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1332 } 1333 if (gp_exp1 && cur_ops->exp_sync) { 1334 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1335 pr_info("%s: Testing expedited GPs.\n", __func__); 1336 } else if (gp_exp && !cur_ops->exp_sync) { 1337 pr_alert("%s: gp_exp without primitives.\n", __func__); 1338 } 1339 if (gp_normal1 && cur_ops->deferred_free) { 1340 synctype[nsynctypes++] = RTWS_DEF_FREE; 1341 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1342 } else if (gp_normal && !cur_ops->deferred_free) { 1343 pr_alert("%s: gp_normal without primitives.\n", __func__); 1344 } 1345 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1346 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1347 synctype[nsynctypes++] = RTWS_POLL_GET; 1348 pr_info("%s: Testing polling GPs.\n", __func__); 1349 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1350 pr_alert("%s: gp_poll without primitives.\n", __func__); 1351 } 1352 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1353 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1354 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1355 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1356 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1357 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1358 } 1359 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1360 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1361 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1362 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1363 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1364 } 1365 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1366 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1367 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1368 } else if (gp_poll_exp_full && 1369 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1370 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1371 } 1372 if (gp_sync1 && cur_ops->sync) { 1373 synctype[nsynctypes++] = RTWS_SYNC; 1374 pr_info("%s: Testing normal GPs.\n", __func__); 1375 } else if (gp_sync && !cur_ops->sync) { 1376 pr_alert("%s: gp_sync without primitives.\n", __func__); 1377 } 1378 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes); 1379 pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp); 1380 } 1381 1382 /* 1383 * Do the specified rcu_torture_writer() synchronous grace period, 1384 * while also testing out the polled APIs. Note well that the single-CPU 1385 * grace-period optimizations must be accounted for. 1386 */ 1387 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1388 { 1389 unsigned long cookie; 1390 struct rcu_gp_oldstate cookie_full; 1391 bool dopoll; 1392 bool dopoll_full; 1393 unsigned long r = torture_random(trsp); 1394 1395 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1396 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1397 if (dopoll || dopoll_full) 1398 cpus_read_lock(); 1399 if (dopoll) 1400 cookie = cur_ops->get_gp_state(); 1401 if (dopoll_full) 1402 cur_ops->get_gp_state_full(&cookie_full); 1403 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1404 sync(); 1405 sync(); 1406 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1407 "%s: Cookie check 3 failed %pS() online %*pbl.", 1408 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1409 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1410 "%s: Cookie check 4 failed %pS() online %*pbl", 1411 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1412 if (dopoll || dopoll_full) 1413 cpus_read_unlock(); 1414 } 1415 1416 /* 1417 * RCU torture writer kthread. Repeatedly substitutes a new structure 1418 * for that pointed to by rcu_torture_current, freeing the old structure 1419 * after a series of grace periods (the "pipeline"). 1420 */ 1421 static int 1422 rcu_torture_writer(void *arg) 1423 { 1424 bool boot_ended; 1425 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1426 unsigned long cookie; 1427 struct rcu_gp_oldstate cookie_full; 1428 int expediting = 0; 1429 unsigned long gp_snap; 1430 unsigned long gp_snap1; 1431 struct rcu_gp_oldstate gp_snap_full; 1432 struct rcu_gp_oldstate gp_snap1_full; 1433 int i; 1434 int idx; 1435 int oldnice = task_nice(current); 1436 struct rcu_gp_oldstate *rgo = NULL; 1437 int rgo_size = 0; 1438 struct rcu_torture *rp; 1439 struct rcu_torture *old_rp; 1440 static DEFINE_TORTURE_RANDOM(rand); 1441 unsigned long stallsdone = jiffies; 1442 bool stutter_waited; 1443 unsigned long *ulo = NULL; 1444 int ulo_size = 0; 1445 1446 // If a new stall test is added, this must be adjusted. 1447 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1448 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * 1449 HZ * (stall_cpu_repeat + 1); 1450 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1451 if (!can_expedite) 1452 pr_alert("%s" TORTURE_FLAG 1453 " GP expediting controlled from boot/sysfs for %s.\n", 1454 torture_type, cur_ops->name); 1455 if (WARN_ONCE(nsynctypes == 0, 1456 "%s: No update-side primitives.\n", __func__)) { 1457 /* 1458 * No updates primitives, so don't try updating. 1459 * The resulting test won't be testing much, hence the 1460 * above WARN_ONCE(). 1461 */ 1462 rcu_torture_writer_state = RTWS_STOPPING; 1463 torture_kthread_stopping("rcu_torture_writer"); 1464 return 0; 1465 } 1466 if (cur_ops->poll_active > 0) { 1467 ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL); 1468 if (!WARN_ON(!ulo)) 1469 ulo_size = cur_ops->poll_active; 1470 } 1471 if (cur_ops->poll_active_full > 0) { 1472 rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL); 1473 if (!WARN_ON(!rgo)) 1474 rgo_size = cur_ops->poll_active_full; 1475 } 1476 1477 do { 1478 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1479 torture_hrtimeout_us(500, 1000, &rand); 1480 rp = rcu_torture_alloc(); 1481 if (rp == NULL) 1482 continue; 1483 rp->rtort_pipe_count = 0; 1484 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1485 rcu_torture_writer_state = RTWS_DELAY; 1486 udelay(torture_random(&rand) & 0x3ff); 1487 rcu_torture_writer_state = RTWS_REPLACE; 1488 old_rp = rcu_dereference_check(rcu_torture_current, 1489 current == writer_task); 1490 rp->rtort_mbtest = 1; 1491 rcu_assign_pointer(rcu_torture_current, rp); 1492 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1493 if (old_rp) { 1494 i = old_rp->rtort_pipe_count; 1495 if (i > RCU_TORTURE_PIPE_LEN) 1496 i = RCU_TORTURE_PIPE_LEN; 1497 atomic_inc(&rcu_torture_wcount[i]); 1498 WRITE_ONCE(old_rp->rtort_pipe_count, 1499 old_rp->rtort_pipe_count + 1); 1500 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1501 1502 // Make sure readers block polled grace periods. 1503 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1504 idx = cur_ops->readlock(); 1505 cookie = cur_ops->get_gp_state(); 1506 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1507 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1508 __func__, 1509 rcu_torture_writer_state_getname(), 1510 rcu_torture_writer_state, 1511 cookie, cur_ops->get_gp_state()); 1512 if (cur_ops->get_comp_state) { 1513 cookie = cur_ops->get_comp_state(); 1514 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1515 } 1516 cur_ops->readunlock(idx); 1517 } 1518 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1519 idx = cur_ops->readlock(); 1520 cur_ops->get_gp_state_full(&cookie_full); 1521 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1522 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1523 __func__, 1524 rcu_torture_writer_state_getname(), 1525 rcu_torture_writer_state, 1526 cpumask_pr_args(cpu_online_mask)); 1527 if (cur_ops->get_comp_state_full) { 1528 cur_ops->get_comp_state_full(&cookie_full); 1529 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1530 } 1531 cur_ops->readunlock(idx); 1532 } 1533 switch (synctype[torture_random(&rand) % nsynctypes]) { 1534 case RTWS_DEF_FREE: 1535 rcu_torture_writer_state = RTWS_DEF_FREE; 1536 cur_ops->deferred_free(old_rp); 1537 break; 1538 case RTWS_EXP_SYNC: 1539 rcu_torture_writer_state = RTWS_EXP_SYNC; 1540 do_rtws_sync(&rand, cur_ops->exp_sync); 1541 rcu_torture_pipe_update(old_rp); 1542 break; 1543 case RTWS_COND_GET: 1544 rcu_torture_writer_state = RTWS_COND_GET; 1545 gp_snap = cur_ops->get_gp_state(); 1546 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1547 1000, &rand); 1548 rcu_torture_writer_state = RTWS_COND_SYNC; 1549 cur_ops->cond_sync(gp_snap); 1550 rcu_torture_pipe_update(old_rp); 1551 break; 1552 case RTWS_COND_GET_EXP: 1553 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1554 gp_snap = cur_ops->get_gp_state_exp(); 1555 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1556 1000, &rand); 1557 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1558 cur_ops->cond_sync_exp(gp_snap); 1559 rcu_torture_pipe_update(old_rp); 1560 break; 1561 case RTWS_COND_GET_FULL: 1562 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1563 cur_ops->get_gp_state_full(&gp_snap_full); 1564 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1565 1000, &rand); 1566 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1567 cur_ops->cond_sync_full(&gp_snap_full); 1568 rcu_torture_pipe_update(old_rp); 1569 break; 1570 case RTWS_COND_GET_EXP_FULL: 1571 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1572 cur_ops->get_gp_state_full(&gp_snap_full); 1573 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1574 1000, &rand); 1575 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1576 cur_ops->cond_sync_exp_full(&gp_snap_full); 1577 rcu_torture_pipe_update(old_rp); 1578 break; 1579 case RTWS_POLL_GET: 1580 rcu_torture_writer_state = RTWS_POLL_GET; 1581 for (i = 0; i < ulo_size; i++) 1582 ulo[i] = cur_ops->get_comp_state(); 1583 gp_snap = cur_ops->start_gp_poll(); 1584 rcu_torture_writer_state = RTWS_POLL_WAIT; 1585 while (!cur_ops->poll_gp_state(gp_snap)) { 1586 gp_snap1 = cur_ops->get_gp_state(); 1587 for (i = 0; i < ulo_size; i++) 1588 if (cur_ops->poll_gp_state(ulo[i]) || 1589 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1590 ulo[i] = gp_snap1; 1591 break; 1592 } 1593 WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size); 1594 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1595 1000, &rand); 1596 } 1597 rcu_torture_pipe_update(old_rp); 1598 break; 1599 case RTWS_POLL_GET_FULL: 1600 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1601 for (i = 0; i < rgo_size; i++) 1602 cur_ops->get_comp_state_full(&rgo[i]); 1603 cur_ops->start_gp_poll_full(&gp_snap_full); 1604 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1605 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1606 cur_ops->get_gp_state_full(&gp_snap1_full); 1607 for (i = 0; i < rgo_size; i++) 1608 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1609 cur_ops->same_gp_state_full(&rgo[i], 1610 &gp_snap1_full)) { 1611 rgo[i] = gp_snap1_full; 1612 break; 1613 } 1614 WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size); 1615 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1616 1000, &rand); 1617 } 1618 rcu_torture_pipe_update(old_rp); 1619 break; 1620 case RTWS_POLL_GET_EXP: 1621 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1622 gp_snap = cur_ops->start_gp_poll_exp(); 1623 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1624 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1625 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1626 1000, &rand); 1627 rcu_torture_pipe_update(old_rp); 1628 break; 1629 case RTWS_POLL_GET_EXP_FULL: 1630 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1631 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1632 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1633 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1634 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1635 1000, &rand); 1636 rcu_torture_pipe_update(old_rp); 1637 break; 1638 case RTWS_SYNC: 1639 rcu_torture_writer_state = RTWS_SYNC; 1640 do_rtws_sync(&rand, cur_ops->sync); 1641 rcu_torture_pipe_update(old_rp); 1642 break; 1643 default: 1644 WARN_ON_ONCE(1); 1645 break; 1646 } 1647 } 1648 WRITE_ONCE(rcu_torture_current_version, 1649 rcu_torture_current_version + 1); 1650 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1651 if (can_expedite && 1652 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1653 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1654 if (expediting >= 0) 1655 rcu_expedite_gp(); 1656 else 1657 rcu_unexpedite_gp(); 1658 if (++expediting > 3) 1659 expediting = -expediting; 1660 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1661 can_expedite = !rcu_gp_is_expedited() && 1662 !rcu_gp_is_normal(); 1663 } 1664 rcu_torture_writer_state = RTWS_STUTTER; 1665 boot_ended = rcu_inkernel_boot_has_ended(); 1666 stutter_waited = stutter_wait("rcu_torture_writer"); 1667 if (stutter_waited && 1668 !atomic_read(&rcu_fwd_cb_nodelay) && 1669 !cur_ops->slow_gps && 1670 !torture_must_stop() && 1671 boot_ended && 1672 time_after(jiffies, stallsdone)) 1673 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1674 if (list_empty(&rcu_tortures[i].rtort_free) && 1675 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1676 tracing_off(); 1677 if (cur_ops->gp_kthread_dbg) 1678 cur_ops->gp_kthread_dbg(); 1679 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1680 rcu_ftrace_dump(DUMP_ALL); 1681 } 1682 if (stutter_waited) 1683 sched_set_normal(current, oldnice); 1684 } while (!torture_must_stop()); 1685 rcu_torture_current = NULL; // Let stats task know that we are done. 1686 /* Reset expediting back to unexpedited. */ 1687 if (expediting > 0) 1688 expediting = -expediting; 1689 while (can_expedite && expediting++ < 0) 1690 rcu_unexpedite_gp(); 1691 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1692 if (!can_expedite) 1693 pr_alert("%s" TORTURE_FLAG 1694 " Dynamic grace-period expediting was disabled.\n", 1695 torture_type); 1696 kfree(ulo); 1697 kfree(rgo); 1698 rcu_torture_writer_state = RTWS_STOPPING; 1699 torture_kthread_stopping("rcu_torture_writer"); 1700 return 0; 1701 } 1702 1703 /* 1704 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1705 * delay between calls. 1706 */ 1707 static int 1708 rcu_torture_fakewriter(void *arg) 1709 { 1710 unsigned long gp_snap; 1711 struct rcu_gp_oldstate gp_snap_full; 1712 DEFINE_TORTURE_RANDOM(rand); 1713 1714 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1715 set_user_nice(current, MAX_NICE); 1716 1717 if (WARN_ONCE(nsynctypes == 0, 1718 "%s: No update-side primitives.\n", __func__)) { 1719 /* 1720 * No updates primitives, so don't try updating. 1721 * The resulting test won't be testing much, hence the 1722 * above WARN_ONCE(). 1723 */ 1724 torture_kthread_stopping("rcu_torture_fakewriter"); 1725 return 0; 1726 } 1727 1728 do { 1729 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1730 if (cur_ops->cb_barrier != NULL && 1731 torture_random(&rand) % (nfakewriters * 8) == 0) { 1732 cur_ops->cb_barrier(); 1733 } else { 1734 switch (synctype[torture_random(&rand) % nsynctypes]) { 1735 case RTWS_DEF_FREE: 1736 break; 1737 case RTWS_EXP_SYNC: 1738 cur_ops->exp_sync(); 1739 break; 1740 case RTWS_COND_GET: 1741 gp_snap = cur_ops->get_gp_state(); 1742 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1743 cur_ops->cond_sync(gp_snap); 1744 break; 1745 case RTWS_COND_GET_EXP: 1746 gp_snap = cur_ops->get_gp_state_exp(); 1747 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1748 cur_ops->cond_sync_exp(gp_snap); 1749 break; 1750 case RTWS_COND_GET_FULL: 1751 cur_ops->get_gp_state_full(&gp_snap_full); 1752 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1753 cur_ops->cond_sync_full(&gp_snap_full); 1754 break; 1755 case RTWS_COND_GET_EXP_FULL: 1756 cur_ops->get_gp_state_full(&gp_snap_full); 1757 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1758 cur_ops->cond_sync_exp_full(&gp_snap_full); 1759 break; 1760 case RTWS_POLL_GET: 1761 if (cur_ops->start_poll_irqsoff) 1762 local_irq_disable(); 1763 gp_snap = cur_ops->start_gp_poll(); 1764 if (cur_ops->start_poll_irqsoff) 1765 local_irq_enable(); 1766 while (!cur_ops->poll_gp_state(gp_snap)) { 1767 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1768 &rand); 1769 } 1770 break; 1771 case RTWS_POLL_GET_FULL: 1772 if (cur_ops->start_poll_irqsoff) 1773 local_irq_disable(); 1774 cur_ops->start_gp_poll_full(&gp_snap_full); 1775 if (cur_ops->start_poll_irqsoff) 1776 local_irq_enable(); 1777 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1778 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1779 &rand); 1780 } 1781 break; 1782 case RTWS_POLL_GET_EXP: 1783 gp_snap = cur_ops->start_gp_poll_exp(); 1784 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1785 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1786 &rand); 1787 } 1788 break; 1789 case RTWS_POLL_GET_EXP_FULL: 1790 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1791 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1792 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1793 &rand); 1794 } 1795 break; 1796 case RTWS_SYNC: 1797 cur_ops->sync(); 1798 break; 1799 default: 1800 WARN_ON_ONCE(1); 1801 break; 1802 } 1803 } 1804 stutter_wait("rcu_torture_fakewriter"); 1805 } while (!torture_must_stop()); 1806 1807 torture_kthread_stopping("rcu_torture_fakewriter"); 1808 return 0; 1809 } 1810 1811 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1812 { 1813 kfree(rhp); 1814 } 1815 1816 // Set up and carry out testing of RCU's global memory ordering 1817 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1818 struct torture_random_state *trsp) 1819 { 1820 unsigned long loops; 1821 int noc = torture_num_online_cpus(); 1822 int rdrchked; 1823 int rdrchker; 1824 struct rcu_torture_reader_check *rtrcp; // Me. 1825 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1826 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1827 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1828 1829 if (myid < 0) 1830 return; // Don't try this from timer handlers. 1831 1832 // Increment my counter. 1833 rtrcp = &rcu_torture_reader_mbchk[myid]; 1834 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1835 1836 // Attempt to assign someone else some checking work. 1837 rdrchked = torture_random(trsp) % nrealreaders; 1838 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1839 rdrchker = torture_random(trsp) % nrealreaders; 1840 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1841 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1842 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1843 !READ_ONCE(rtp->rtort_chkp) && 1844 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1845 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1846 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1847 rtrcp->rtc_chkrdr = rdrchked; 1848 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1849 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1850 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1851 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1852 } 1853 1854 // If assigned some completed work, do it! 1855 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1856 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1857 return; // No work or work not yet ready. 1858 rdrchked = rtrcp_assigner->rtc_chkrdr; 1859 if (WARN_ON_ONCE(rdrchked < 0)) 1860 return; 1861 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1862 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1863 atomic_inc(&n_rcu_torture_mbchk_tries); 1864 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1865 atomic_inc(&n_rcu_torture_mbchk_fail); 1866 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1867 rtrcp_assigner->rtc_ready = 0; 1868 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1869 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1870 } 1871 1872 // Verify the specified RCUTORTURE_RDR* state. 1873 #define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count() 1874 static void rcutorture_one_extend_check(char *s, int curstate, int new, int old, bool insoftirq) 1875 { 1876 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE)) 1877 return; 1878 1879 WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled(), ROEC_ARGS); 1880 WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS); 1881 1882 // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable. 1883 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 1884 return; 1885 1886 WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 1887 !(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS); 1888 WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) && 1889 !(preempt_count() & PREEMPT_MASK), ROEC_ARGS); 1890 WARN_ONCE(cur_ops->readlock_nesting && 1891 (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) && 1892 cur_ops->readlock_nesting() == 0, ROEC_ARGS); 1893 1894 // Timer handlers have all sorts of stuff disabled, so ignore 1895 // unintended disabling. 1896 if (insoftirq) 1897 return; 1898 1899 WARN_ONCE(cur_ops->extendables && 1900 !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 1901 (preempt_count() & SOFTIRQ_MASK), ROEC_ARGS); 1902 WARN_ONCE(cur_ops->extendables && 1903 !(curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) && 1904 (preempt_count() & PREEMPT_MASK), ROEC_ARGS); 1905 WARN_ONCE(cur_ops->readlock_nesting && 1906 !(curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) && 1907 cur_ops->readlock_nesting() > 0, ROEC_ARGS); 1908 } 1909 1910 /* 1911 * Do one extension of an RCU read-side critical section using the 1912 * current reader state in readstate (set to zero for initial entry 1913 * to extended critical section), set the new state as specified by 1914 * newstate (set to zero for final exit from extended critical section), 1915 * and random-number-generator state in trsp. If this is neither the 1916 * beginning or end of the critical section and if there was actually a 1917 * change, do a ->read_delay(). 1918 */ 1919 static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq, 1920 struct torture_random_state *trsp, 1921 struct rt_read_seg *rtrsp) 1922 { 1923 bool first; 1924 unsigned long flags; 1925 int idxnew1 = -1; 1926 int idxnew2 = -1; 1927 int idxold1 = *readstate; 1928 int idxold2 = idxold1; 1929 int statesnew = ~*readstate & newstate; 1930 int statesold = *readstate & ~newstate; 1931 1932 first = idxold1 == 0; 1933 WARN_ON_ONCE(idxold2 < 0); 1934 WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS); 1935 rcutorture_one_extend_check("before change", idxold1, statesnew, statesold, insoftirq); 1936 rtrsp->rt_readstate = newstate; 1937 1938 /* First, put new protection in place to avoid critical-section gap. */ 1939 if (statesnew & RCUTORTURE_RDR_BH) 1940 local_bh_disable(); 1941 if (statesnew & RCUTORTURE_RDR_RBH) 1942 rcu_read_lock_bh(); 1943 if (statesnew & RCUTORTURE_RDR_IRQ) 1944 local_irq_disable(); 1945 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1946 preempt_disable(); 1947 if (statesnew & RCUTORTURE_RDR_SCHED) 1948 rcu_read_lock_sched(); 1949 if (statesnew & RCUTORTURE_RDR_RCU_1) 1950 idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 1951 if (statesnew & RCUTORTURE_RDR_RCU_2) 1952 idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2; 1953 1954 // Complain unless both the old and the new protection is in place. 1955 rcutorture_one_extend_check("during change", 1956 idxold1 | statesnew, statesnew, statesold, insoftirq); 1957 1958 // Sample CPU under both sets of protections to reduce confusion. 1959 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 1960 int cpu = raw_smp_processor_id(); 1961 rtrsp->rt_cpu = cpu; 1962 if (!first) { 1963 rtrsp[-1].rt_end_cpu = cpu; 1964 if (cur_ops->reader_blocked) 1965 rtrsp[-1].rt_preempted = cur_ops->reader_blocked(); 1966 } 1967 } 1968 1969 /* 1970 * Next, remove old protection, in decreasing order of strength 1971 * to avoid unlock paths that aren't safe in the stronger 1972 * context. Namely: BH can not be enabled with disabled interrupts. 1973 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1974 * context. 1975 */ 1976 if (statesold & RCUTORTURE_RDR_IRQ) 1977 local_irq_enable(); 1978 if (statesold & RCUTORTURE_RDR_PREEMPT) 1979 preempt_enable(); 1980 if (statesold & RCUTORTURE_RDR_SCHED) 1981 rcu_read_unlock_sched(); 1982 if (statesold & RCUTORTURE_RDR_BH) 1983 local_bh_enable(); 1984 if (statesold & RCUTORTURE_RDR_RBH) 1985 rcu_read_unlock_bh(); 1986 if (statesold & RCUTORTURE_RDR_RCU_2) { 1987 cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2); 1988 WARN_ON_ONCE(idxnew2 != -1); 1989 idxold2 = 0; 1990 } 1991 if (statesold & RCUTORTURE_RDR_RCU_1) { 1992 bool lockit; 1993 1994 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1995 if (lockit) 1996 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1997 cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 1998 WARN_ON_ONCE(idxnew1 != -1); 1999 idxold1 = 0; 2000 if (lockit) 2001 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 2002 } 2003 2004 /* Delay if neither beginning nor end and there was a change. */ 2005 if ((statesnew || statesold) && *readstate && newstate) 2006 cur_ops->read_delay(trsp, rtrsp); 2007 2008 /* Update the reader state. */ 2009 if (idxnew1 == -1) 2010 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 2011 WARN_ON_ONCE(idxnew1 < 0); 2012 if (idxnew2 == -1) 2013 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 2014 WARN_ON_ONCE(idxnew2 < 0); 2015 *readstate = idxnew1 | idxnew2 | newstate; 2016 WARN_ON_ONCE(*readstate < 0); 2017 if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS)) 2018 pr_info("Unexpected readstate value of %#x\n", *readstate); 2019 rcutorture_one_extend_check("after change", *readstate, statesnew, statesold, insoftirq); 2020 } 2021 2022 /* Return the biggest extendables mask given current RCU and boot parameters. */ 2023 static int rcutorture_extend_mask_max(void) 2024 { 2025 int mask; 2026 2027 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 2028 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 2029 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2030 return mask; 2031 } 2032 2033 /* Return a random protection state mask, but with at least one bit set. */ 2034 static int 2035 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 2036 { 2037 int mask = rcutorture_extend_mask_max(); 2038 unsigned long randmask1 = torture_random(trsp); 2039 unsigned long randmask2 = randmask1 >> 3; 2040 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2041 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 2042 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2043 2044 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits. 2045 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 2046 if (!(randmask1 & 0x7)) 2047 mask = mask & randmask2; 2048 else 2049 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 2050 2051 // Can't have nested RCU reader without outer RCU reader. 2052 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 2053 if (oldmask & RCUTORTURE_RDR_RCU_1) 2054 mask &= ~RCUTORTURE_RDR_RCU_2; 2055 else 2056 mask |= RCUTORTURE_RDR_RCU_1; 2057 } 2058 2059 /* 2060 * Can't enable bh w/irq disabled. 2061 */ 2062 if (mask & RCUTORTURE_RDR_IRQ) 2063 mask |= oldmask & bhs; 2064 2065 /* 2066 * Ideally these sequences would be detected in debug builds 2067 * (regardless of RT), but until then don't stop testing 2068 * them on non-RT. 2069 */ 2070 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 2071 /* Can't modify BH in atomic context */ 2072 if (oldmask & preempts_irq) 2073 mask &= ~bhs; 2074 if ((oldmask | mask) & preempts_irq) 2075 mask |= oldmask & bhs; 2076 } 2077 2078 return mask ?: RCUTORTURE_RDR_RCU_1; 2079 } 2080 2081 /* 2082 * Do a randomly selected number of extensions of an existing RCU read-side 2083 * critical section. 2084 */ 2085 static struct rt_read_seg * 2086 rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_state *trsp, 2087 struct rt_read_seg *rtrsp) 2088 { 2089 int i; 2090 int j; 2091 int mask = rcutorture_extend_mask_max(); 2092 2093 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 2094 if (!((mask - 1) & mask)) 2095 return rtrsp; /* Current RCU reader not extendable. */ 2096 /* Bias towards larger numbers of loops. */ 2097 i = torture_random(trsp); 2098 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 2099 for (j = 0; j < i; j++) { 2100 mask = rcutorture_extend_mask(*readstate, trsp); 2101 rcutorture_one_extend(readstate, mask, insoftirq, trsp, &rtrsp[j]); 2102 } 2103 return &rtrsp[j]; 2104 } 2105 2106 /* 2107 * Do one read-side critical section, returning false if there was 2108 * no data to read. Can be invoked both from process context and 2109 * from a timer handler. 2110 */ 2111 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 2112 { 2113 bool checkpolling = !(torture_random(trsp) & 0xfff); 2114 unsigned long cookie; 2115 struct rcu_gp_oldstate cookie_full; 2116 int i; 2117 unsigned long started; 2118 unsigned long completed; 2119 int newstate; 2120 struct rcu_torture *p; 2121 int pipe_count; 2122 bool preempted = false; 2123 int readstate = 0; 2124 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 2125 struct rt_read_seg *rtrsp = &rtseg[0]; 2126 struct rt_read_seg *rtrsp1; 2127 unsigned long long ts; 2128 2129 WARN_ON_ONCE(!rcu_is_watching()); 2130 newstate = rcutorture_extend_mask(readstate, trsp); 2131 rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++); 2132 if (checkpolling) { 2133 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2134 cookie = cur_ops->get_gp_state(); 2135 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2136 cur_ops->get_gp_state_full(&cookie_full); 2137 } 2138 started = cur_ops->get_gp_seq(); 2139 ts = rcu_trace_clock_local(); 2140 p = rcu_dereference_check(rcu_torture_current, 2141 !cur_ops->readlock_held || cur_ops->readlock_held()); 2142 if (p == NULL) { 2143 /* Wait for rcu_torture_writer to get underway */ 2144 rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp); 2145 return false; 2146 } 2147 if (p->rtort_mbtest == 0) 2148 atomic_inc(&n_rcu_torture_mberror); 2149 rcu_torture_reader_do_mbchk(myid, p, trsp); 2150 rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp); 2151 preempt_disable(); 2152 pipe_count = READ_ONCE(p->rtort_pipe_count); 2153 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2154 // Should not happen in a correct RCU implementation, 2155 // happens quite often for torture_type=busted. 2156 pipe_count = RCU_TORTURE_PIPE_LEN; 2157 } 2158 completed = cur_ops->get_gp_seq(); 2159 if (pipe_count > 1) { 2160 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 2161 ts, started, completed); 2162 rcu_ftrace_dump(DUMP_ALL); 2163 } 2164 __this_cpu_inc(rcu_torture_count[pipe_count]); 2165 completed = rcutorture_seq_diff(completed, started); 2166 if (completed > RCU_TORTURE_PIPE_LEN) { 2167 /* Should not happen, but... */ 2168 completed = RCU_TORTURE_PIPE_LEN; 2169 } 2170 __this_cpu_inc(rcu_torture_batch[completed]); 2171 preempt_enable(); 2172 if (checkpolling) { 2173 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2174 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2175 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2176 __func__, 2177 rcu_torture_writer_state_getname(), 2178 rcu_torture_writer_state, 2179 cookie, cur_ops->get_gp_state()); 2180 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2181 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2182 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2183 __func__, 2184 rcu_torture_writer_state_getname(), 2185 rcu_torture_writer_state, 2186 cpumask_pr_args(cpu_online_mask)); 2187 } 2188 if (cur_ops->reader_blocked) 2189 preempted = cur_ops->reader_blocked(); 2190 rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp); 2191 WARN_ON_ONCE(readstate); 2192 // This next splat is expected behavior if leakpointer, especially 2193 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2194 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2195 2196 /* If error or close call, record the sequence of reader protections. */ 2197 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2198 i = 0; 2199 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2200 err_segs[i++] = *rtrsp1; 2201 rt_read_nsegs = i; 2202 rt_read_preempted = preempted; 2203 } 2204 2205 return true; 2206 } 2207 2208 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2209 2210 /* 2211 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2212 * incrementing the corresponding element of the pipeline array. The 2213 * counter in the element should never be greater than 1, otherwise, the 2214 * RCU implementation is broken. 2215 */ 2216 static void rcu_torture_timer(struct timer_list *unused) 2217 { 2218 atomic_long_inc(&n_rcu_torture_timers); 2219 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2220 2221 /* Test call_rcu() invocation from interrupt handler. */ 2222 if (cur_ops->call) { 2223 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2224 2225 if (rhp) 2226 cur_ops->call(rhp, rcu_torture_timer_cb); 2227 } 2228 } 2229 2230 /* 2231 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2232 * incrementing the corresponding element of the pipeline array. The 2233 * counter in the element should never be greater than 1, otherwise, the 2234 * RCU implementation is broken. 2235 */ 2236 static int 2237 rcu_torture_reader(void *arg) 2238 { 2239 unsigned long lastsleep = jiffies; 2240 long myid = (long)arg; 2241 int mynumonline = myid; 2242 DEFINE_TORTURE_RANDOM(rand); 2243 struct timer_list t; 2244 2245 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2246 set_user_nice(current, MAX_NICE); 2247 if (irqreader && cur_ops->irq_capable) 2248 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2249 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2250 do { 2251 if (irqreader && cur_ops->irq_capable) { 2252 if (!timer_pending(&t)) 2253 mod_timer(&t, jiffies + 1); 2254 } 2255 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2256 schedule_timeout_interruptible(HZ); 2257 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2258 torture_hrtimeout_us(500, 1000, &rand); 2259 lastsleep = jiffies + 10; 2260 } 2261 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2262 schedule_timeout_interruptible(HZ / 5); 2263 stutter_wait("rcu_torture_reader"); 2264 } while (!torture_must_stop()); 2265 if (irqreader && cur_ops->irq_capable) { 2266 del_timer_sync(&t); 2267 destroy_timer_on_stack(&t); 2268 } 2269 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2270 torture_kthread_stopping("rcu_torture_reader"); 2271 return 0; 2272 } 2273 2274 /* 2275 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2276 * increase race probabilities and fuzzes the interval between toggling. 2277 */ 2278 static int rcu_nocb_toggle(void *arg) 2279 { 2280 int cpu; 2281 int maxcpu = -1; 2282 int oldnice = task_nice(current); 2283 long r; 2284 DEFINE_TORTURE_RANDOM(rand); 2285 ktime_t toggle_delay; 2286 unsigned long toggle_fuzz; 2287 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2288 2289 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2290 while (!rcu_inkernel_boot_has_ended()) 2291 schedule_timeout_interruptible(HZ / 10); 2292 for_each_possible_cpu(cpu) 2293 maxcpu = cpu; 2294 WARN_ON(maxcpu < 0); 2295 if (toggle_interval > ULONG_MAX) 2296 toggle_fuzz = ULONG_MAX >> 3; 2297 else 2298 toggle_fuzz = toggle_interval >> 3; 2299 if (toggle_fuzz <= 0) 2300 toggle_fuzz = NSEC_PER_USEC; 2301 do { 2302 r = torture_random(&rand); 2303 cpu = (r >> 1) % (maxcpu + 1); 2304 if (r & 0x1) { 2305 rcu_nocb_cpu_offload(cpu); 2306 atomic_long_inc(&n_nocb_offload); 2307 } else { 2308 rcu_nocb_cpu_deoffload(cpu); 2309 atomic_long_inc(&n_nocb_deoffload); 2310 } 2311 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2312 set_current_state(TASK_INTERRUPTIBLE); 2313 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2314 if (stutter_wait("rcu_nocb_toggle")) 2315 sched_set_normal(current, oldnice); 2316 } while (!torture_must_stop()); 2317 torture_kthread_stopping("rcu_nocb_toggle"); 2318 return 0; 2319 } 2320 2321 /* 2322 * Print torture statistics. Caller must ensure that there is only 2323 * one call to this function at a given time!!! This is normally 2324 * accomplished by relying on the module system to only have one copy 2325 * of the module loaded, and then by giving the rcu_torture_stats 2326 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2327 * thread is not running). 2328 */ 2329 static void 2330 rcu_torture_stats_print(void) 2331 { 2332 int cpu; 2333 int i; 2334 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2335 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2336 struct rcu_torture *rtcp; 2337 static unsigned long rtcv_snap = ULONG_MAX; 2338 static bool splatted; 2339 struct task_struct *wtp; 2340 2341 for_each_possible_cpu(cpu) { 2342 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2343 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2344 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2345 } 2346 } 2347 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2348 if (pipesummary[i] != 0) 2349 break; 2350 } 2351 2352 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2353 rtcp = rcu_access_pointer(rcu_torture_current); 2354 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2355 rtcp, 2356 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2357 rcu_torture_current_version, 2358 list_empty(&rcu_torture_freelist), 2359 atomic_read(&n_rcu_torture_alloc), 2360 atomic_read(&n_rcu_torture_alloc_fail), 2361 atomic_read(&n_rcu_torture_free)); 2362 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2363 atomic_read(&n_rcu_torture_mberror), 2364 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2365 n_rcu_torture_barrier_error, 2366 n_rcu_torture_boost_ktrerror); 2367 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2368 n_rcu_torture_boost_failure, 2369 n_rcu_torture_boosts, 2370 atomic_long_read(&n_rcu_torture_timers)); 2371 torture_onoff_stats(); 2372 pr_cont("barrier: %ld/%ld:%ld ", 2373 data_race(n_barrier_successes), 2374 data_race(n_barrier_attempts), 2375 data_race(n_rcu_torture_barrier_error)); 2376 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2377 pr_cont("nocb-toggles: %ld:%ld\n", 2378 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2379 2380 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2381 if (atomic_read(&n_rcu_torture_mberror) || 2382 atomic_read(&n_rcu_torture_mbchk_fail) || 2383 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2384 n_rcu_torture_boost_failure || i > 1) { 2385 pr_cont("%s", "!!! "); 2386 atomic_inc(&n_rcu_torture_error); 2387 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2388 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2389 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2390 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2391 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2392 WARN_ON_ONCE(i > 1); // Too-short grace period 2393 } 2394 pr_cont("Reader Pipe: "); 2395 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2396 pr_cont(" %ld", pipesummary[i]); 2397 pr_cont("\n"); 2398 2399 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2400 pr_cont("Reader Batch: "); 2401 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2402 pr_cont(" %ld", batchsummary[i]); 2403 pr_cont("\n"); 2404 2405 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2406 pr_cont("Free-Block Circulation: "); 2407 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2408 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2409 } 2410 pr_cont("\n"); 2411 2412 if (cur_ops->stats) 2413 cur_ops->stats(); 2414 if (rtcv_snap == rcu_torture_current_version && 2415 rcu_access_pointer(rcu_torture_current) && 2416 !rcu_stall_is_suppressed()) { 2417 int __maybe_unused flags = 0; 2418 unsigned long __maybe_unused gp_seq = 0; 2419 2420 if (cur_ops->get_gp_data) 2421 cur_ops->get_gp_data(&flags, &gp_seq); 2422 wtp = READ_ONCE(writer_task); 2423 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2424 rcu_torture_writer_state_getname(), 2425 rcu_torture_writer_state, gp_seq, flags, 2426 wtp == NULL ? ~0U : wtp->__state, 2427 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2428 if (!splatted && wtp) { 2429 sched_show_task(wtp); 2430 splatted = true; 2431 } 2432 if (cur_ops->gp_kthread_dbg) 2433 cur_ops->gp_kthread_dbg(); 2434 rcu_ftrace_dump(DUMP_ALL); 2435 } 2436 rtcv_snap = rcu_torture_current_version; 2437 } 2438 2439 /* 2440 * Periodically prints torture statistics, if periodic statistics printing 2441 * was specified via the stat_interval module parameter. 2442 */ 2443 static int 2444 rcu_torture_stats(void *arg) 2445 { 2446 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2447 do { 2448 schedule_timeout_interruptible(stat_interval * HZ); 2449 rcu_torture_stats_print(); 2450 torture_shutdown_absorb("rcu_torture_stats"); 2451 } while (!torture_must_stop()); 2452 torture_kthread_stopping("rcu_torture_stats"); 2453 return 0; 2454 } 2455 2456 /* Test mem_dump_obj() and friends. */ 2457 static void rcu_torture_mem_dump_obj(void) 2458 { 2459 struct rcu_head *rhp; 2460 struct kmem_cache *kcp; 2461 static int z; 2462 2463 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2464 if (WARN_ON_ONCE(!kcp)) 2465 return; 2466 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2467 if (WARN_ON_ONCE(!rhp)) { 2468 kmem_cache_destroy(kcp); 2469 return; 2470 } 2471 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2472 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2473 mem_dump_obj(ZERO_SIZE_PTR); 2474 pr_alert("mem_dump_obj(NULL):"); 2475 mem_dump_obj(NULL); 2476 pr_alert("mem_dump_obj(%px):", &rhp); 2477 mem_dump_obj(&rhp); 2478 pr_alert("mem_dump_obj(%px):", rhp); 2479 mem_dump_obj(rhp); 2480 pr_alert("mem_dump_obj(%px):", &rhp->func); 2481 mem_dump_obj(&rhp->func); 2482 pr_alert("mem_dump_obj(%px):", &z); 2483 mem_dump_obj(&z); 2484 kmem_cache_free(kcp, rhp); 2485 kmem_cache_destroy(kcp); 2486 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2487 if (WARN_ON_ONCE(!rhp)) 2488 return; 2489 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2490 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2491 mem_dump_obj(rhp); 2492 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2493 mem_dump_obj(&rhp->func); 2494 kfree(rhp); 2495 rhp = vmalloc(4096); 2496 if (WARN_ON_ONCE(!rhp)) 2497 return; 2498 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2499 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2500 mem_dump_obj(rhp); 2501 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2502 mem_dump_obj(&rhp->func); 2503 vfree(rhp); 2504 } 2505 2506 static void 2507 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2508 { 2509 pr_alert("%s" TORTURE_FLAG 2510 "--- %s: nreaders=%d nfakewriters=%d " 2511 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2512 "shuffle_interval=%d stutter=%d irqreader=%d " 2513 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2514 "test_boost=%d/%d test_boost_interval=%d " 2515 "test_boost_duration=%d shutdown_secs=%d " 2516 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2517 "stall_cpu_block=%d stall_cpu_repeat=%d " 2518 "n_barrier_cbs=%d " 2519 "onoff_interval=%d onoff_holdoff=%d " 2520 "read_exit_delay=%d read_exit_burst=%d " 2521 "reader_flavor=%x " 2522 "nocbs_nthreads=%d nocbs_toggle=%d " 2523 "test_nmis=%d " 2524 "preempt_duration=%d preempt_interval=%d\n", 2525 torture_type, tag, nrealreaders, nfakewriters, 2526 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2527 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2528 test_boost, cur_ops->can_boost, 2529 test_boost_interval, test_boost_duration, shutdown_secs, 2530 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2531 stall_cpu_block, stall_cpu_repeat, 2532 n_barrier_cbs, 2533 onoff_interval, onoff_holdoff, 2534 read_exit_delay, read_exit_burst, 2535 reader_flavor, 2536 nocbs_nthreads, nocbs_toggle, 2537 test_nmis, 2538 preempt_duration, preempt_interval); 2539 } 2540 2541 static int rcutorture_booster_cleanup(unsigned int cpu) 2542 { 2543 struct task_struct *t; 2544 2545 if (boost_tasks[cpu] == NULL) 2546 return 0; 2547 mutex_lock(&boost_mutex); 2548 t = boost_tasks[cpu]; 2549 boost_tasks[cpu] = NULL; 2550 rcu_torture_enable_rt_throttle(); 2551 mutex_unlock(&boost_mutex); 2552 2553 /* This must be outside of the mutex, otherwise deadlock! */ 2554 torture_stop_kthread(rcu_torture_boost, t); 2555 return 0; 2556 } 2557 2558 static int rcutorture_booster_init(unsigned int cpu) 2559 { 2560 int retval; 2561 2562 if (boost_tasks[cpu] != NULL) 2563 return 0; /* Already created, nothing more to do. */ 2564 2565 // Testing RCU priority boosting requires rcutorture do 2566 // some serious abuse. Counter this by running ksoftirqd 2567 // at higher priority. 2568 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2569 struct sched_param sp; 2570 struct task_struct *t; 2571 2572 t = per_cpu(ksoftirqd, cpu); 2573 WARN_ON_ONCE(!t); 2574 sp.sched_priority = 2; 2575 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2576 #ifdef CONFIG_IRQ_FORCED_THREADING 2577 if (force_irqthreads()) { 2578 t = per_cpu(ktimerd, cpu); 2579 WARN_ON_ONCE(!t); 2580 sp.sched_priority = 2; 2581 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2582 } 2583 #endif 2584 } 2585 2586 /* Don't allow time recalculation while creating a new task. */ 2587 mutex_lock(&boost_mutex); 2588 rcu_torture_disable_rt_throttle(); 2589 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2590 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2591 cpu, "rcu_torture_boost_%u"); 2592 if (IS_ERR(boost_tasks[cpu])) { 2593 retval = PTR_ERR(boost_tasks[cpu]); 2594 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2595 n_rcu_torture_boost_ktrerror++; 2596 boost_tasks[cpu] = NULL; 2597 mutex_unlock(&boost_mutex); 2598 return retval; 2599 } 2600 mutex_unlock(&boost_mutex); 2601 return 0; 2602 } 2603 2604 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2605 { 2606 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2607 return NOTIFY_OK; 2608 } 2609 2610 static struct notifier_block rcu_torture_stall_block = { 2611 .notifier_call = rcu_torture_stall_nf, 2612 }; 2613 2614 /* 2615 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2616 * induces a CPU stall for the time specified by stall_cpu. If a new 2617 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 2618 */ 2619 static void rcu_torture_stall_one(int rep, int irqsoff) 2620 { 2621 int idx; 2622 unsigned long stop_at; 2623 2624 if (stall_cpu_holdoff > 0) { 2625 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2626 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2627 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2628 } 2629 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2630 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2631 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2632 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2633 if (kthread_should_stop()) 2634 break; 2635 schedule_timeout_uninterruptible(HZ); 2636 } 2637 } 2638 if (!kthread_should_stop() && stall_cpu > 0) { 2639 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2640 stop_at = ktime_get_seconds() + stall_cpu; 2641 /* RCU CPU stall is expected behavior in following code. */ 2642 idx = cur_ops->readlock(); 2643 if (irqsoff) 2644 local_irq_disable(); 2645 else if (!stall_cpu_block) 2646 preempt_disable(); 2647 pr_alert("%s start stall episode %d on CPU %d.\n", 2648 __func__, rep + 1, raw_smp_processor_id()); 2649 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 2650 !kthread_should_stop()) 2651 if (stall_cpu_block) { 2652 #ifdef CONFIG_PREEMPTION 2653 preempt_schedule(); 2654 #else 2655 schedule_timeout_uninterruptible(HZ); 2656 #endif 2657 } else if (stall_no_softlockup) { 2658 touch_softlockup_watchdog(); 2659 } 2660 if (irqsoff) 2661 local_irq_enable(); 2662 else if (!stall_cpu_block) 2663 preempt_enable(); 2664 cur_ops->readunlock(idx); 2665 } 2666 } 2667 2668 /* 2669 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many 2670 * additional times as specified by the stall_cpu_repeat module parameter. 2671 * Note that stall_cpu_irqsoff is ignored on the second and subsequent 2672 * stall. 2673 */ 2674 static int rcu_torture_stall(void *args) 2675 { 2676 int i; 2677 int repeat = stall_cpu_repeat; 2678 int ret; 2679 2680 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2681 if (repeat < 0) { 2682 repeat = 0; 2683 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); 2684 } 2685 if (rcu_cpu_stall_notifiers) { 2686 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 2687 if (ret) 2688 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 2689 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 2690 } 2691 for (i = 0; i <= repeat; i++) { 2692 if (kthread_should_stop()) 2693 break; 2694 rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0); 2695 } 2696 pr_alert("%s end.\n", __func__); 2697 if (rcu_cpu_stall_notifiers && !ret) { 2698 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 2699 if (ret) 2700 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 2701 } 2702 torture_shutdown_absorb("rcu_torture_stall"); 2703 while (!kthread_should_stop()) 2704 schedule_timeout_interruptible(10 * HZ); 2705 return 0; 2706 } 2707 2708 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2709 static int __init rcu_torture_stall_init(void) 2710 { 2711 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2712 return 0; 2713 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2714 } 2715 2716 /* State structure for forward-progress self-propagating RCU callback. */ 2717 struct fwd_cb_state { 2718 struct rcu_head rh; 2719 int stop; 2720 }; 2721 2722 /* 2723 * Forward-progress self-propagating RCU callback function. Because 2724 * callbacks run from softirq, this function is an implicit RCU read-side 2725 * critical section. 2726 */ 2727 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2728 { 2729 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2730 2731 if (READ_ONCE(fcsp->stop)) { 2732 WRITE_ONCE(fcsp->stop, 2); 2733 return; 2734 } 2735 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2736 } 2737 2738 /* State for continuous-flood RCU callbacks. */ 2739 struct rcu_fwd_cb { 2740 struct rcu_head rh; 2741 struct rcu_fwd_cb *rfc_next; 2742 struct rcu_fwd *rfc_rfp; 2743 int rfc_gps; 2744 }; 2745 2746 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2747 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2748 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2749 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2750 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2751 2752 struct rcu_launder_hist { 2753 long n_launders; 2754 unsigned long launder_gp_seq; 2755 }; 2756 2757 struct rcu_fwd { 2758 spinlock_t rcu_fwd_lock; 2759 struct rcu_fwd_cb *rcu_fwd_cb_head; 2760 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2761 long n_launders_cb; 2762 unsigned long rcu_fwd_startat; 2763 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2764 unsigned long rcu_launder_gp_seq_start; 2765 int rcu_fwd_id; 2766 }; 2767 2768 static DEFINE_MUTEX(rcu_fwd_mutex); 2769 static struct rcu_fwd *rcu_fwds; 2770 static unsigned long rcu_fwd_seq; 2771 static atomic_long_t rcu_fwd_max_cbs; 2772 static bool rcu_fwd_emergency_stop; 2773 2774 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2775 { 2776 unsigned long gps; 2777 unsigned long gps_old; 2778 int i; 2779 int j; 2780 2781 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2782 if (rfp->n_launders_hist[i].n_launders > 0) 2783 break; 2784 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2785 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2786 gps_old = rfp->rcu_launder_gp_seq_start; 2787 for (j = 0; j <= i; j++) { 2788 gps = rfp->n_launders_hist[j].launder_gp_seq; 2789 pr_cont(" %ds/%d: %ld:%ld", 2790 j + 1, FWD_CBS_HIST_DIV, 2791 rfp->n_launders_hist[j].n_launders, 2792 rcutorture_seq_diff(gps, gps_old)); 2793 gps_old = gps; 2794 } 2795 pr_cont("\n"); 2796 } 2797 2798 /* Callback function for continuous-flood RCU callbacks. */ 2799 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2800 { 2801 unsigned long flags; 2802 int i; 2803 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2804 struct rcu_fwd_cb **rfcpp; 2805 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2806 2807 rfcp->rfc_next = NULL; 2808 rfcp->rfc_gps++; 2809 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2810 rfcpp = rfp->rcu_fwd_cb_tail; 2811 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2812 smp_store_release(rfcpp, rfcp); 2813 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2814 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2815 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2816 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2817 rfp->n_launders_hist[i].n_launders++; 2818 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2819 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2820 } 2821 2822 // Give the scheduler a chance, even on nohz_full CPUs. 2823 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2824 { 2825 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2826 // Real call_rcu() floods hit userspace, so emulate that. 2827 if (need_resched() || (iter & 0xfff)) 2828 schedule(); 2829 return; 2830 } 2831 // No userspace emulation: CB invocation throttles call_rcu() 2832 cond_resched(); 2833 } 2834 2835 /* 2836 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2837 * test is over or because we hit an OOM event. 2838 */ 2839 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2840 { 2841 unsigned long flags; 2842 unsigned long freed = 0; 2843 struct rcu_fwd_cb *rfcp; 2844 2845 for (;;) { 2846 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2847 rfcp = rfp->rcu_fwd_cb_head; 2848 if (!rfcp) { 2849 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2850 break; 2851 } 2852 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2853 if (!rfp->rcu_fwd_cb_head) 2854 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2855 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2856 kfree(rfcp); 2857 freed++; 2858 rcu_torture_fwd_prog_cond_resched(freed); 2859 if (tick_nohz_full_enabled()) { 2860 local_irq_save(flags); 2861 rcu_momentary_eqs(); 2862 local_irq_restore(flags); 2863 } 2864 } 2865 return freed; 2866 } 2867 2868 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2869 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2870 int *tested, int *tested_tries) 2871 { 2872 unsigned long cver; 2873 unsigned long dur; 2874 struct fwd_cb_state fcs; 2875 unsigned long gps; 2876 int idx; 2877 int sd; 2878 int sd4; 2879 bool selfpropcb = false; 2880 unsigned long stopat; 2881 static DEFINE_TORTURE_RANDOM(trs); 2882 2883 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2884 if (!cur_ops->sync) 2885 return; // Cannot do need_resched() forward progress testing without ->sync. 2886 if (cur_ops->call && cur_ops->cb_barrier) { 2887 init_rcu_head_on_stack(&fcs.rh); 2888 selfpropcb = true; 2889 } 2890 2891 /* Tight loop containing cond_resched(). */ 2892 atomic_inc(&rcu_fwd_cb_nodelay); 2893 cur_ops->sync(); /* Later readers see above write. */ 2894 if (selfpropcb) { 2895 WRITE_ONCE(fcs.stop, 0); 2896 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2897 } 2898 cver = READ_ONCE(rcu_torture_current_version); 2899 gps = cur_ops->get_gp_seq(); 2900 sd = cur_ops->stall_dur() + 1; 2901 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2902 dur = sd4 + torture_random(&trs) % (sd - sd4); 2903 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2904 stopat = rfp->rcu_fwd_startat + dur; 2905 while (time_before(jiffies, stopat) && 2906 !shutdown_time_arrived() && 2907 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2908 idx = cur_ops->readlock(); 2909 udelay(10); 2910 cur_ops->readunlock(idx); 2911 if (!fwd_progress_need_resched || need_resched()) 2912 cond_resched(); 2913 } 2914 (*tested_tries)++; 2915 if (!time_before(jiffies, stopat) && 2916 !shutdown_time_arrived() && 2917 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2918 (*tested)++; 2919 cver = READ_ONCE(rcu_torture_current_version) - cver; 2920 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2921 WARN_ON(!cver && gps < 2); 2922 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2923 rfp->rcu_fwd_id, dur, cver, gps); 2924 } 2925 if (selfpropcb) { 2926 WRITE_ONCE(fcs.stop, 1); 2927 cur_ops->sync(); /* Wait for running CB to complete. */ 2928 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2929 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2930 } 2931 2932 if (selfpropcb) { 2933 WARN_ON(READ_ONCE(fcs.stop) != 2); 2934 destroy_rcu_head_on_stack(&fcs.rh); 2935 } 2936 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2937 atomic_dec(&rcu_fwd_cb_nodelay); 2938 } 2939 2940 /* Carry out call_rcu() forward-progress testing. */ 2941 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2942 { 2943 unsigned long cver; 2944 unsigned long flags; 2945 unsigned long gps; 2946 int i; 2947 long n_launders; 2948 long n_launders_cb_snap; 2949 long n_launders_sa; 2950 long n_max_cbs; 2951 long n_max_gps; 2952 struct rcu_fwd_cb *rfcp; 2953 struct rcu_fwd_cb *rfcpn; 2954 unsigned long stopat; 2955 unsigned long stoppedat; 2956 2957 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2958 if (READ_ONCE(rcu_fwd_emergency_stop)) 2959 return; /* Get out of the way quickly, no GP wait! */ 2960 if (!cur_ops->call) 2961 return; /* Can't do call_rcu() fwd prog without ->call. */ 2962 2963 /* Loop continuously posting RCU callbacks. */ 2964 atomic_inc(&rcu_fwd_cb_nodelay); 2965 cur_ops->sync(); /* Later readers see above write. */ 2966 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2967 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2968 n_launders = 0; 2969 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2970 n_launders_sa = 0; 2971 n_max_cbs = 0; 2972 n_max_gps = 0; 2973 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2974 rfp->n_launders_hist[i].n_launders = 0; 2975 cver = READ_ONCE(rcu_torture_current_version); 2976 gps = cur_ops->get_gp_seq(); 2977 rfp->rcu_launder_gp_seq_start = gps; 2978 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2979 while (time_before(jiffies, stopat) && 2980 !shutdown_time_arrived() && 2981 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2982 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2983 rfcpn = NULL; 2984 if (rfcp) 2985 rfcpn = READ_ONCE(rfcp->rfc_next); 2986 if (rfcpn) { 2987 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2988 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2989 break; 2990 rfp->rcu_fwd_cb_head = rfcpn; 2991 n_launders++; 2992 n_launders_sa++; 2993 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2994 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2995 if (WARN_ON_ONCE(!rfcp)) { 2996 schedule_timeout_interruptible(1); 2997 continue; 2998 } 2999 n_max_cbs++; 3000 n_launders_sa = 0; 3001 rfcp->rfc_gps = 0; 3002 rfcp->rfc_rfp = rfp; 3003 } else { 3004 rfcp = NULL; 3005 } 3006 if (rfcp) 3007 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 3008 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 3009 if (tick_nohz_full_enabled()) { 3010 local_irq_save(flags); 3011 rcu_momentary_eqs(); 3012 local_irq_restore(flags); 3013 } 3014 } 3015 stoppedat = jiffies; 3016 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 3017 cver = READ_ONCE(rcu_torture_current_version) - cver; 3018 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3019 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3020 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 3021 (void)rcu_torture_fwd_prog_cbfree(rfp); 3022 3023 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 3024 !shutdown_time_arrived()) { 3025 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 3026 cur_ops->gp_kthread_dbg(); 3027 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 3028 __func__, 3029 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 3030 n_launders + n_max_cbs - n_launders_cb_snap, 3031 n_launders, n_launders_sa, 3032 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 3033 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 3034 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 3035 rcu_torture_fwd_cb_hist(rfp); 3036 mutex_unlock(&rcu_fwd_mutex); 3037 } 3038 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 3039 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 3040 atomic_dec(&rcu_fwd_cb_nodelay); 3041 } 3042 3043 3044 /* 3045 * OOM notifier, but this only prints diagnostic information for the 3046 * current forward-progress test. 3047 */ 3048 static int rcutorture_oom_notify(struct notifier_block *self, 3049 unsigned long notused, void *nfreed) 3050 { 3051 int i; 3052 long ncbs; 3053 struct rcu_fwd *rfp; 3054 3055 mutex_lock(&rcu_fwd_mutex); 3056 rfp = rcu_fwds; 3057 if (!rfp) { 3058 mutex_unlock(&rcu_fwd_mutex); 3059 return NOTIFY_OK; 3060 } 3061 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 3062 __func__); 3063 for (i = 0; i < fwd_progress; i++) { 3064 rcu_torture_fwd_cb_hist(&rfp[i]); 3065 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 3066 } 3067 WRITE_ONCE(rcu_fwd_emergency_stop, true); 3068 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 3069 ncbs = 0; 3070 for (i = 0; i < fwd_progress; i++) 3071 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3072 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3073 cur_ops->cb_barrier(); 3074 ncbs = 0; 3075 for (i = 0; i < fwd_progress; i++) 3076 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3077 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3078 cur_ops->cb_barrier(); 3079 ncbs = 0; 3080 for (i = 0; i < fwd_progress; i++) 3081 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3082 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3083 smp_mb(); /* Frees before return to avoid redoing OOM. */ 3084 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 3085 pr_info("%s returning after OOM processing.\n", __func__); 3086 mutex_unlock(&rcu_fwd_mutex); 3087 return NOTIFY_OK; 3088 } 3089 3090 static struct notifier_block rcutorture_oom_nb = { 3091 .notifier_call = rcutorture_oom_notify 3092 }; 3093 3094 /* Carry out grace-period forward-progress testing. */ 3095 static int rcu_torture_fwd_prog(void *args) 3096 { 3097 bool firsttime = true; 3098 long max_cbs; 3099 int oldnice = task_nice(current); 3100 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 3101 struct rcu_fwd *rfp = args; 3102 int tested = 0; 3103 int tested_tries = 0; 3104 3105 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 3106 rcu_bind_current_to_nocb(); 3107 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 3108 set_user_nice(current, MAX_NICE); 3109 do { 3110 if (!rfp->rcu_fwd_id) { 3111 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 3112 WRITE_ONCE(rcu_fwd_emergency_stop, false); 3113 if (!firsttime) { 3114 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 3115 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 3116 } 3117 firsttime = false; 3118 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 3119 } else { 3120 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 3121 schedule_timeout_interruptible(HZ / 20); 3122 oldseq = READ_ONCE(rcu_fwd_seq); 3123 } 3124 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3125 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 3126 rcu_torture_fwd_prog_cr(rfp); 3127 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 3128 (!IS_ENABLED(CONFIG_TINY_RCU) || 3129 (rcu_inkernel_boot_has_ended() && 3130 torture_num_online_cpus() > rfp->rcu_fwd_id))) 3131 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 3132 3133 /* Avoid slow periods, better to test when busy. */ 3134 if (stutter_wait("rcu_torture_fwd_prog")) 3135 sched_set_normal(current, oldnice); 3136 } while (!torture_must_stop()); 3137 /* Short runs might not contain a valid forward-progress attempt. */ 3138 if (!rfp->rcu_fwd_id) { 3139 WARN_ON(!tested && tested_tries >= 5); 3140 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 3141 } 3142 torture_kthread_stopping("rcu_torture_fwd_prog"); 3143 return 0; 3144 } 3145 3146 /* If forward-progress checking is requested and feasible, spawn the thread. */ 3147 static int __init rcu_torture_fwd_prog_init(void) 3148 { 3149 int i; 3150 int ret = 0; 3151 struct rcu_fwd *rfp; 3152 3153 if (!fwd_progress) 3154 return 0; /* Not requested, so don't do it. */ 3155 if (fwd_progress >= nr_cpu_ids) { 3156 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 3157 fwd_progress = nr_cpu_ids; 3158 } else if (fwd_progress < 0) { 3159 fwd_progress = nr_cpu_ids; 3160 } 3161 if ((!cur_ops->sync && !cur_ops->call) || 3162 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 3163 cur_ops == &rcu_busted_ops) { 3164 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 3165 fwd_progress = 0; 3166 return 0; 3167 } 3168 if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3169 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing"); 3170 fwd_progress = 0; 3171 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 3172 return -EINVAL; /* In module, can fail back to user. */ 3173 WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */ 3174 return 0; 3175 } 3176 if (fwd_progress_holdoff <= 0) 3177 fwd_progress_holdoff = 1; 3178 if (fwd_progress_div <= 0) 3179 fwd_progress_div = 4; 3180 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 3181 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 3182 if (!rfp || !fwd_prog_tasks) { 3183 kfree(rfp); 3184 kfree(fwd_prog_tasks); 3185 fwd_prog_tasks = NULL; 3186 fwd_progress = 0; 3187 return -ENOMEM; 3188 } 3189 for (i = 0; i < fwd_progress; i++) { 3190 spin_lock_init(&rfp[i].rcu_fwd_lock); 3191 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3192 rfp[i].rcu_fwd_id = i; 3193 } 3194 mutex_lock(&rcu_fwd_mutex); 3195 rcu_fwds = rfp; 3196 mutex_unlock(&rcu_fwd_mutex); 3197 register_oom_notifier(&rcutorture_oom_nb); 3198 for (i = 0; i < fwd_progress; i++) { 3199 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3200 if (ret) { 3201 fwd_progress = i; 3202 return ret; 3203 } 3204 } 3205 return 0; 3206 } 3207 3208 static void rcu_torture_fwd_prog_cleanup(void) 3209 { 3210 int i; 3211 struct rcu_fwd *rfp; 3212 3213 if (!rcu_fwds || !fwd_prog_tasks) 3214 return; 3215 for (i = 0; i < fwd_progress; i++) 3216 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3217 unregister_oom_notifier(&rcutorture_oom_nb); 3218 mutex_lock(&rcu_fwd_mutex); 3219 rfp = rcu_fwds; 3220 rcu_fwds = NULL; 3221 mutex_unlock(&rcu_fwd_mutex); 3222 kfree(rfp); 3223 kfree(fwd_prog_tasks); 3224 fwd_prog_tasks = NULL; 3225 } 3226 3227 /* Callback function for RCU barrier testing. */ 3228 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3229 { 3230 atomic_inc(&barrier_cbs_invoked); 3231 } 3232 3233 /* IPI handler to get callback posted on desired CPU, if online. */ 3234 static int rcu_torture_barrier1cb(void *rcu_void) 3235 { 3236 struct rcu_head *rhp = rcu_void; 3237 3238 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3239 return 0; 3240 } 3241 3242 /* kthread function to register callbacks used to test RCU barriers. */ 3243 static int rcu_torture_barrier_cbs(void *arg) 3244 { 3245 long myid = (long)arg; 3246 bool lastphase = false; 3247 bool newphase; 3248 struct rcu_head rcu; 3249 3250 init_rcu_head_on_stack(&rcu); 3251 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3252 set_user_nice(current, MAX_NICE); 3253 do { 3254 wait_event(barrier_cbs_wq[myid], 3255 (newphase = 3256 smp_load_acquire(&barrier_phase)) != lastphase || 3257 torture_must_stop()); 3258 lastphase = newphase; 3259 if (torture_must_stop()) 3260 break; 3261 /* 3262 * The above smp_load_acquire() ensures barrier_phase load 3263 * is ordered before the following ->call(). 3264 */ 3265 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3266 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3267 3268 if (atomic_dec_and_test(&barrier_cbs_count)) 3269 wake_up(&barrier_wq); 3270 } while (!torture_must_stop()); 3271 if (cur_ops->cb_barrier != NULL) 3272 cur_ops->cb_barrier(); 3273 destroy_rcu_head_on_stack(&rcu); 3274 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3275 return 0; 3276 } 3277 3278 /* kthread function to drive and coordinate RCU barrier testing. */ 3279 static int rcu_torture_barrier(void *arg) 3280 { 3281 int i; 3282 3283 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3284 do { 3285 atomic_set(&barrier_cbs_invoked, 0); 3286 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3287 /* Ensure barrier_phase ordered after prior assignments. */ 3288 smp_store_release(&barrier_phase, !barrier_phase); 3289 for (i = 0; i < n_barrier_cbs; i++) 3290 wake_up(&barrier_cbs_wq[i]); 3291 wait_event(barrier_wq, 3292 atomic_read(&barrier_cbs_count) == 0 || 3293 torture_must_stop()); 3294 if (torture_must_stop()) 3295 break; 3296 n_barrier_attempts++; 3297 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3298 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3299 n_rcu_torture_barrier_error++; 3300 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3301 atomic_read(&barrier_cbs_invoked), 3302 n_barrier_cbs); 3303 WARN_ON(1); 3304 // Wait manually for the remaining callbacks 3305 i = 0; 3306 do { 3307 if (WARN_ON(i++ > HZ)) 3308 i = INT_MIN; 3309 schedule_timeout_interruptible(1); 3310 cur_ops->cb_barrier(); 3311 } while (atomic_read(&barrier_cbs_invoked) != 3312 n_barrier_cbs && 3313 !torture_must_stop()); 3314 smp_mb(); // Can't trust ordering if broken. 3315 if (!torture_must_stop()) 3316 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3317 atomic_read(&barrier_cbs_invoked)); 3318 } else { 3319 n_barrier_successes++; 3320 } 3321 schedule_timeout_interruptible(HZ / 10); 3322 } while (!torture_must_stop()); 3323 torture_kthread_stopping("rcu_torture_barrier"); 3324 return 0; 3325 } 3326 3327 /* Initialize RCU barrier testing. */ 3328 static int rcu_torture_barrier_init(void) 3329 { 3330 int i; 3331 int ret; 3332 3333 if (n_barrier_cbs <= 0) 3334 return 0; 3335 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3336 pr_alert("%s" TORTURE_FLAG 3337 " Call or barrier ops missing for %s,\n", 3338 torture_type, cur_ops->name); 3339 pr_alert("%s" TORTURE_FLAG 3340 " RCU barrier testing omitted from run.\n", 3341 torture_type); 3342 return 0; 3343 } 3344 atomic_set(&barrier_cbs_count, 0); 3345 atomic_set(&barrier_cbs_invoked, 0); 3346 barrier_cbs_tasks = 3347 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3348 GFP_KERNEL); 3349 barrier_cbs_wq = 3350 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3351 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3352 return -ENOMEM; 3353 for (i = 0; i < n_barrier_cbs; i++) { 3354 init_waitqueue_head(&barrier_cbs_wq[i]); 3355 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3356 (void *)(long)i, 3357 barrier_cbs_tasks[i]); 3358 if (ret) 3359 return ret; 3360 } 3361 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3362 } 3363 3364 /* Clean up after RCU barrier testing. */ 3365 static void rcu_torture_barrier_cleanup(void) 3366 { 3367 int i; 3368 3369 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3370 if (barrier_cbs_tasks != NULL) { 3371 for (i = 0; i < n_barrier_cbs; i++) 3372 torture_stop_kthread(rcu_torture_barrier_cbs, 3373 barrier_cbs_tasks[i]); 3374 kfree(barrier_cbs_tasks); 3375 barrier_cbs_tasks = NULL; 3376 } 3377 if (barrier_cbs_wq != NULL) { 3378 kfree(barrier_cbs_wq); 3379 barrier_cbs_wq = NULL; 3380 } 3381 } 3382 3383 static bool rcu_torture_can_boost(void) 3384 { 3385 static int boost_warn_once; 3386 int prio; 3387 3388 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3389 return false; 3390 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3391 return false; 3392 3393 prio = rcu_get_gp_kthreads_prio(); 3394 if (!prio) 3395 return false; 3396 3397 if (prio < 2) { 3398 if (boost_warn_once == 1) 3399 return false; 3400 3401 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3402 boost_warn_once = 1; 3403 return false; 3404 } 3405 3406 return true; 3407 } 3408 3409 static bool read_exit_child_stop; 3410 static bool read_exit_child_stopped; 3411 static wait_queue_head_t read_exit_wq; 3412 3413 // Child kthread which just does an rcutorture reader and exits. 3414 static int rcu_torture_read_exit_child(void *trsp_in) 3415 { 3416 struct torture_random_state *trsp = trsp_in; 3417 3418 set_user_nice(current, MAX_NICE); 3419 // Minimize time between reading and exiting. 3420 while (!kthread_should_stop()) 3421 schedule_timeout_uninterruptible(HZ / 20); 3422 (void)rcu_torture_one_read(trsp, -1); 3423 return 0; 3424 } 3425 3426 // Parent kthread which creates and destroys read-exit child kthreads. 3427 static int rcu_torture_read_exit(void *unused) 3428 { 3429 bool errexit = false; 3430 int i; 3431 struct task_struct *tsp; 3432 DEFINE_TORTURE_RANDOM(trs); 3433 3434 // Allocate and initialize. 3435 set_user_nice(current, MAX_NICE); 3436 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3437 3438 // Each pass through this loop does one read-exit episode. 3439 do { 3440 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3441 for (i = 0; i < read_exit_burst; i++) { 3442 if (READ_ONCE(read_exit_child_stop)) 3443 break; 3444 stutter_wait("rcu_torture_read_exit"); 3445 // Spawn child. 3446 tsp = kthread_run(rcu_torture_read_exit_child, 3447 &trs, "%s", "rcu_torture_read_exit_child"); 3448 if (IS_ERR(tsp)) { 3449 TOROUT_ERRSTRING("out of memory"); 3450 errexit = true; 3451 break; 3452 } 3453 cond_resched(); 3454 kthread_stop(tsp); 3455 n_read_exits++; 3456 } 3457 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3458 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3459 i = 0; 3460 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3461 schedule_timeout_uninterruptible(HZ); 3462 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3463 3464 // Clean up and exit. 3465 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3466 smp_mb(); // Store before wakeup. 3467 wake_up(&read_exit_wq); 3468 while (!torture_must_stop()) 3469 schedule_timeout_uninterruptible(HZ / 20); 3470 torture_kthread_stopping("rcu_torture_read_exit"); 3471 return 0; 3472 } 3473 3474 static int rcu_torture_read_exit_init(void) 3475 { 3476 if (read_exit_burst <= 0) 3477 return 0; 3478 init_waitqueue_head(&read_exit_wq); 3479 read_exit_child_stop = false; 3480 read_exit_child_stopped = false; 3481 return torture_create_kthread(rcu_torture_read_exit, NULL, 3482 read_exit_task); 3483 } 3484 3485 static void rcu_torture_read_exit_cleanup(void) 3486 { 3487 if (!read_exit_task) 3488 return; 3489 WRITE_ONCE(read_exit_child_stop, true); 3490 smp_mb(); // Above write before wait. 3491 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3492 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3493 } 3494 3495 static void rcutorture_test_nmis(int n) 3496 { 3497 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3498 int cpu; 3499 int dumpcpu; 3500 int i; 3501 3502 for (i = 0; i < n; i++) { 3503 preempt_disable(); 3504 cpu = smp_processor_id(); 3505 dumpcpu = cpu + 1; 3506 if (dumpcpu >= nr_cpu_ids) 3507 dumpcpu = 0; 3508 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3509 dump_cpu_task(dumpcpu); 3510 preempt_enable(); 3511 schedule_timeout_uninterruptible(15 * HZ); 3512 } 3513 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3514 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3515 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3516 } 3517 3518 // Randomly preempt online CPUs. 3519 static int rcu_torture_preempt(void *unused) 3520 { 3521 int cpu = -1; 3522 DEFINE_TORTURE_RANDOM(rand); 3523 3524 schedule_timeout_idle(stall_cpu_holdoff); 3525 do { 3526 // Wait for preempt_interval ms with up to 100us fuzz. 3527 torture_hrtimeout_ms(preempt_interval, 100, &rand); 3528 // Select online CPU. 3529 cpu = cpumask_next(cpu, cpu_online_mask); 3530 if (cpu >= nr_cpu_ids) 3531 cpu = cpumask_next(-1, cpu_online_mask); 3532 WARN_ON_ONCE(cpu >= nr_cpu_ids); 3533 // Move to that CPU, if can't do so, retry later. 3534 if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false)) 3535 continue; 3536 // Preempt at high-ish priority, then reset to normal. 3537 sched_set_fifo(current); 3538 torture_sched_setaffinity(current->pid, cpu_present_mask, true); 3539 mdelay(preempt_duration); 3540 sched_set_normal(current, 0); 3541 stutter_wait("rcu_torture_preempt"); 3542 } while (!torture_must_stop()); 3543 torture_kthread_stopping("rcu_torture_preempt"); 3544 return 0; 3545 } 3546 3547 static enum cpuhp_state rcutor_hp; 3548 3549 static void 3550 rcu_torture_cleanup(void) 3551 { 3552 int firsttime; 3553 int flags = 0; 3554 unsigned long gp_seq = 0; 3555 int i; 3556 3557 if (torture_cleanup_begin()) { 3558 if (cur_ops->cb_barrier != NULL) { 3559 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3560 cur_ops->cb_barrier(); 3561 } 3562 if (cur_ops->gp_slow_unregister) 3563 cur_ops->gp_slow_unregister(NULL); 3564 return; 3565 } 3566 if (!cur_ops) { 3567 torture_cleanup_end(); 3568 return; 3569 } 3570 3571 rcutorture_test_nmis(test_nmis); 3572 3573 if (cur_ops->gp_kthread_dbg) 3574 cur_ops->gp_kthread_dbg(); 3575 torture_stop_kthread(rcu_torture_preempt, preempt_task); 3576 rcu_torture_read_exit_cleanup(); 3577 rcu_torture_barrier_cleanup(); 3578 rcu_torture_fwd_prog_cleanup(); 3579 torture_stop_kthread(rcu_torture_stall, stall_task); 3580 torture_stop_kthread(rcu_torture_writer, writer_task); 3581 3582 if (nocb_tasks) { 3583 for (i = 0; i < nrealnocbers; i++) 3584 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3585 kfree(nocb_tasks); 3586 nocb_tasks = NULL; 3587 } 3588 3589 if (reader_tasks) { 3590 for (i = 0; i < nrealreaders; i++) 3591 torture_stop_kthread(rcu_torture_reader, 3592 reader_tasks[i]); 3593 kfree(reader_tasks); 3594 reader_tasks = NULL; 3595 } 3596 kfree(rcu_torture_reader_mbchk); 3597 rcu_torture_reader_mbchk = NULL; 3598 3599 if (fakewriter_tasks) { 3600 for (i = 0; i < nfakewriters; i++) 3601 torture_stop_kthread(rcu_torture_fakewriter, 3602 fakewriter_tasks[i]); 3603 kfree(fakewriter_tasks); 3604 fakewriter_tasks = NULL; 3605 } 3606 3607 if (cur_ops->get_gp_data) 3608 cur_ops->get_gp_data(&flags, &gp_seq); 3609 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3610 cur_ops->name, (long)gp_seq, flags, 3611 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3612 torture_stop_kthread(rcu_torture_stats, stats_task); 3613 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3614 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3615 cpuhp_remove_state(rcutor_hp); 3616 3617 /* 3618 * Wait for all RCU callbacks to fire, then do torture-type-specific 3619 * cleanup operations. 3620 */ 3621 if (cur_ops->cb_barrier != NULL) { 3622 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3623 cur_ops->cb_barrier(); 3624 } 3625 if (cur_ops->cleanup != NULL) 3626 cur_ops->cleanup(); 3627 3628 rcu_torture_mem_dump_obj(); 3629 3630 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3631 3632 if (err_segs_recorded) { 3633 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3634 if (rt_read_nsegs == 0) 3635 pr_alert("\t: No segments recorded!!!\n"); 3636 firsttime = 1; 3637 for (i = 0; i < rt_read_nsegs; i++) { 3638 pr_alert("\t%d: %#4x", i, err_segs[i].rt_readstate); 3639 if (err_segs[i].rt_delay_jiffies != 0) { 3640 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3641 err_segs[i].rt_delay_jiffies); 3642 firsttime = 0; 3643 } 3644 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 3645 pr_cont(" CPU %2d", err_segs[i].rt_cpu); 3646 if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu) 3647 pr_cont("->%-2d", err_segs[i].rt_end_cpu); 3648 else 3649 pr_cont(" ..."); 3650 } 3651 if (err_segs[i].rt_delay_ms != 0) { 3652 pr_cont(" %s%ldms", firsttime ? "" : "+", 3653 err_segs[i].rt_delay_ms); 3654 firsttime = 0; 3655 } 3656 if (err_segs[i].rt_delay_us != 0) { 3657 pr_cont(" %s%ldus", firsttime ? "" : "+", 3658 err_segs[i].rt_delay_us); 3659 firsttime = 0; 3660 } 3661 pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : ""); 3662 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH) 3663 pr_cont(" BH"); 3664 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ) 3665 pr_cont(" IRQ"); 3666 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT) 3667 pr_cont(" PREEMPT"); 3668 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH) 3669 pr_cont(" RBH"); 3670 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED) 3671 pr_cont(" SCHED"); 3672 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1) 3673 pr_cont(" RCU_1"); 3674 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2) 3675 pr_cont(" RCU_2"); 3676 pr_cont("\n"); 3677 3678 } 3679 if (rt_read_preempted) 3680 pr_alert("\tReader was preempted.\n"); 3681 } 3682 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3683 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3684 else if (torture_onoff_failures()) 3685 rcu_torture_print_module_parms(cur_ops, 3686 "End of test: RCU_HOTPLUG"); 3687 else 3688 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3689 torture_cleanup_end(); 3690 if (cur_ops->gp_slow_unregister) 3691 cur_ops->gp_slow_unregister(NULL); 3692 } 3693 3694 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3695 { 3696 } 3697 3698 static void rcu_torture_err_cb(struct rcu_head *rhp) 3699 { 3700 /* 3701 * This -might- happen due to race conditions, but is unlikely. 3702 * The scenario that leads to this happening is that the 3703 * first of the pair of duplicate callbacks is queued, 3704 * someone else starts a grace period that includes that 3705 * callback, then the second of the pair must wait for the 3706 * next grace period. Unlikely, but can happen. If it 3707 * does happen, the debug-objects subsystem won't have splatted. 3708 */ 3709 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3710 } 3711 3712 /* 3713 * Verify that double-free causes debug-objects to complain, but only 3714 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3715 * cannot be carried out. 3716 */ 3717 static void rcu_test_debug_objects(void) 3718 { 3719 struct rcu_head rh1; 3720 struct rcu_head rh2; 3721 int idx; 3722 3723 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 3724 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 3725 KBUILD_MODNAME, cur_ops->name); 3726 return; 3727 } 3728 3729 if (WARN_ON_ONCE(cur_ops->debug_objects && 3730 (!cur_ops->call || !cur_ops->cb_barrier))) 3731 return; 3732 3733 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3734 3735 init_rcu_head_on_stack(&rh1); 3736 init_rcu_head_on_stack(&rh2); 3737 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 3738 3739 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3740 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 3741 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3742 cur_ops->call(&rh2, rcu_torture_leak_cb); 3743 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3744 if (rhp) { 3745 cur_ops->call(rhp, rcu_torture_leak_cb); 3746 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3747 } 3748 cur_ops->readunlock(idx); 3749 3750 /* Wait for them all to get done so we can safely return. */ 3751 cur_ops->cb_barrier(); 3752 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 3753 destroy_rcu_head_on_stack(&rh1); 3754 destroy_rcu_head_on_stack(&rh2); 3755 kfree(rhp); 3756 } 3757 3758 static void rcutorture_sync(void) 3759 { 3760 static unsigned long n; 3761 3762 if (cur_ops->sync && !(++n & 0xfff)) 3763 cur_ops->sync(); 3764 } 3765 3766 static DEFINE_MUTEX(mut0); 3767 static DEFINE_MUTEX(mut1); 3768 static DEFINE_MUTEX(mut2); 3769 static DEFINE_MUTEX(mut3); 3770 static DEFINE_MUTEX(mut4); 3771 static DEFINE_MUTEX(mut5); 3772 static DEFINE_MUTEX(mut6); 3773 static DEFINE_MUTEX(mut7); 3774 static DEFINE_MUTEX(mut8); 3775 static DEFINE_MUTEX(mut9); 3776 3777 static DECLARE_RWSEM(rwsem0); 3778 static DECLARE_RWSEM(rwsem1); 3779 static DECLARE_RWSEM(rwsem2); 3780 static DECLARE_RWSEM(rwsem3); 3781 static DECLARE_RWSEM(rwsem4); 3782 static DECLARE_RWSEM(rwsem5); 3783 static DECLARE_RWSEM(rwsem6); 3784 static DECLARE_RWSEM(rwsem7); 3785 static DECLARE_RWSEM(rwsem8); 3786 static DECLARE_RWSEM(rwsem9); 3787 3788 DEFINE_STATIC_SRCU(srcu0); 3789 DEFINE_STATIC_SRCU(srcu1); 3790 DEFINE_STATIC_SRCU(srcu2); 3791 DEFINE_STATIC_SRCU(srcu3); 3792 DEFINE_STATIC_SRCU(srcu4); 3793 DEFINE_STATIC_SRCU(srcu5); 3794 DEFINE_STATIC_SRCU(srcu6); 3795 DEFINE_STATIC_SRCU(srcu7); 3796 DEFINE_STATIC_SRCU(srcu8); 3797 DEFINE_STATIC_SRCU(srcu9); 3798 3799 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 3800 int cyclelen, int deadlock) 3801 { 3802 int j = i + 1; 3803 3804 if (j >= cyclelen) 3805 j = deadlock ? 0 : -1; 3806 if (j >= 0) 3807 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 3808 else 3809 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 3810 return j; 3811 } 3812 3813 // Test lockdep on SRCU-based deadlock scenarios. 3814 static void rcu_torture_init_srcu_lockdep(void) 3815 { 3816 int cyclelen; 3817 int deadlock; 3818 bool err = false; 3819 int i; 3820 int j; 3821 int idx; 3822 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 3823 &mut5, &mut6, &mut7, &mut8, &mut9 }; 3824 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 3825 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 3826 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 3827 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 3828 int testtype; 3829 3830 if (!test_srcu_lockdep) 3831 return; 3832 3833 deadlock = test_srcu_lockdep / 1000; 3834 testtype = (test_srcu_lockdep / 10) % 100; 3835 cyclelen = test_srcu_lockdep % 10; 3836 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 3837 if (WARN_ONCE(deadlock != !!deadlock, 3838 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 3839 __func__, test_srcu_lockdep, deadlock)) 3840 err = true; 3841 if (WARN_ONCE(cyclelen <= 0, 3842 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 3843 __func__, test_srcu_lockdep, cyclelen)) 3844 err = true; 3845 if (err) 3846 goto err_out; 3847 3848 if (testtype == 0) { 3849 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 3850 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3851 if (deadlock && cyclelen == 1) 3852 pr_info("%s: Expect hang.\n", __func__); 3853 for (i = 0; i < cyclelen; i++) { 3854 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 3855 "srcu_read_unlock", i, cyclelen, deadlock); 3856 idx = srcu_read_lock(srcus[i]); 3857 if (j >= 0) 3858 synchronize_srcu(srcus[j]); 3859 srcu_read_unlock(srcus[i], idx); 3860 } 3861 return; 3862 } 3863 3864 if (testtype == 1) { 3865 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 3866 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3867 for (i = 0; i < cyclelen; i++) { 3868 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 3869 __func__, i, i, i, i); 3870 idx = srcu_read_lock(srcus[i]); 3871 mutex_lock(muts[i]); 3872 mutex_unlock(muts[i]); 3873 srcu_read_unlock(srcus[i], idx); 3874 3875 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 3876 "mutex_unlock", i, cyclelen, deadlock); 3877 mutex_lock(muts[i]); 3878 if (j >= 0) 3879 synchronize_srcu(srcus[j]); 3880 mutex_unlock(muts[i]); 3881 } 3882 return; 3883 } 3884 3885 if (testtype == 2) { 3886 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 3887 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3888 for (i = 0; i < cyclelen; i++) { 3889 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 3890 __func__, i, i, i, i); 3891 idx = srcu_read_lock(srcus[i]); 3892 down_read(rwsems[i]); 3893 up_read(rwsems[i]); 3894 srcu_read_unlock(srcus[i], idx); 3895 3896 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 3897 "up_write", i, cyclelen, deadlock); 3898 down_write(rwsems[i]); 3899 if (j >= 0) 3900 synchronize_srcu(srcus[j]); 3901 up_write(rwsems[i]); 3902 } 3903 return; 3904 } 3905 3906 #ifdef CONFIG_TASKS_TRACE_RCU 3907 if (testtype == 3) { 3908 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 3909 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 3910 if (deadlock && cyclelen == 1) 3911 pr_info("%s: Expect hang.\n", __func__); 3912 for (i = 0; i < cyclelen; i++) { 3913 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 3914 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 3915 : "synchronize_srcu"; 3916 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 3917 3918 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 3919 if (i == 0) 3920 rcu_read_lock_trace(); 3921 else 3922 idx = srcu_read_lock(srcus[i]); 3923 if (j >= 0) { 3924 if (i == cyclelen - 1) 3925 synchronize_rcu_tasks_trace(); 3926 else 3927 synchronize_srcu(srcus[j]); 3928 } 3929 if (i == 0) 3930 rcu_read_unlock_trace(); 3931 else 3932 srcu_read_unlock(srcus[i], idx); 3933 } 3934 return; 3935 } 3936 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 3937 3938 err_out: 3939 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 3940 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 3941 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 3942 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 3943 pr_info("%s: L: Cycle length.\n", __func__); 3944 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 3945 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 3946 } 3947 3948 static int __init 3949 rcu_torture_init(void) 3950 { 3951 long i; 3952 int cpu; 3953 int firsterr = 0; 3954 int flags = 0; 3955 unsigned long gp_seq = 0; 3956 static struct rcu_torture_ops *torture_ops[] = { 3957 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3958 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3959 &trivial_ops, 3960 }; 3961 3962 if (!torture_init_begin(torture_type, verbose)) 3963 return -EBUSY; 3964 3965 /* Process args and tell the world that the torturer is on the job. */ 3966 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3967 cur_ops = torture_ops[i]; 3968 if (strcmp(torture_type, cur_ops->name) == 0) 3969 break; 3970 } 3971 if (i == ARRAY_SIZE(torture_ops)) { 3972 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3973 torture_type); 3974 pr_alert("rcu-torture types:"); 3975 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3976 pr_cont(" %s", torture_ops[i]->name); 3977 pr_cont("\n"); 3978 firsterr = -EINVAL; 3979 cur_ops = NULL; 3980 goto unwind; 3981 } 3982 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3983 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3984 fqs_duration = 0; 3985 } 3986 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 3987 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3988 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 3989 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 3990 nocbs_nthreads = 0; 3991 } 3992 if (cur_ops->init) 3993 cur_ops->init(); 3994 3995 rcu_torture_init_srcu_lockdep(); 3996 3997 if (nreaders >= 0) { 3998 nrealreaders = nreaders; 3999 } else { 4000 nrealreaders = num_online_cpus() - 2 - nreaders; 4001 if (nrealreaders <= 0) 4002 nrealreaders = 1; 4003 } 4004 rcu_torture_print_module_parms(cur_ops, "Start of test"); 4005 if (cur_ops->get_gp_data) 4006 cur_ops->get_gp_data(&flags, &gp_seq); 4007 start_gp_seq = gp_seq; 4008 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 4009 cur_ops->name, (long)gp_seq, flags); 4010 4011 /* Set up the freelist. */ 4012 4013 INIT_LIST_HEAD(&rcu_torture_freelist); 4014 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 4015 rcu_tortures[i].rtort_mbtest = 0; 4016 list_add_tail(&rcu_tortures[i].rtort_free, 4017 &rcu_torture_freelist); 4018 } 4019 4020 /* Initialize the statistics so that each run gets its own numbers. */ 4021 4022 rcu_torture_current = NULL; 4023 rcu_torture_current_version = 0; 4024 atomic_set(&n_rcu_torture_alloc, 0); 4025 atomic_set(&n_rcu_torture_alloc_fail, 0); 4026 atomic_set(&n_rcu_torture_free, 0); 4027 atomic_set(&n_rcu_torture_mberror, 0); 4028 atomic_set(&n_rcu_torture_mbchk_fail, 0); 4029 atomic_set(&n_rcu_torture_mbchk_tries, 0); 4030 atomic_set(&n_rcu_torture_error, 0); 4031 n_rcu_torture_barrier_error = 0; 4032 n_rcu_torture_boost_ktrerror = 0; 4033 n_rcu_torture_boost_failure = 0; 4034 n_rcu_torture_boosts = 0; 4035 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 4036 atomic_set(&rcu_torture_wcount[i], 0); 4037 for_each_possible_cpu(cpu) { 4038 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 4039 per_cpu(rcu_torture_count, cpu)[i] = 0; 4040 per_cpu(rcu_torture_batch, cpu)[i] = 0; 4041 } 4042 } 4043 err_segs_recorded = 0; 4044 rt_read_nsegs = 0; 4045 4046 /* Start up the kthreads. */ 4047 4048 rcu_torture_write_types(); 4049 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 4050 writer_task); 4051 if (torture_init_error(firsterr)) 4052 goto unwind; 4053 if (nfakewriters > 0) { 4054 fakewriter_tasks = kcalloc(nfakewriters, 4055 sizeof(fakewriter_tasks[0]), 4056 GFP_KERNEL); 4057 if (fakewriter_tasks == NULL) { 4058 TOROUT_ERRSTRING("out of memory"); 4059 firsterr = -ENOMEM; 4060 goto unwind; 4061 } 4062 } 4063 for (i = 0; i < nfakewriters; i++) { 4064 firsterr = torture_create_kthread(rcu_torture_fakewriter, 4065 NULL, fakewriter_tasks[i]); 4066 if (torture_init_error(firsterr)) 4067 goto unwind; 4068 } 4069 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 4070 GFP_KERNEL); 4071 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 4072 GFP_KERNEL); 4073 if (!reader_tasks || !rcu_torture_reader_mbchk) { 4074 TOROUT_ERRSTRING("out of memory"); 4075 firsterr = -ENOMEM; 4076 goto unwind; 4077 } 4078 for (i = 0; i < nrealreaders; i++) { 4079 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 4080 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 4081 reader_tasks[i]); 4082 if (torture_init_error(firsterr)) 4083 goto unwind; 4084 } 4085 nrealnocbers = nocbs_nthreads; 4086 if (WARN_ON(nrealnocbers < 0)) 4087 nrealnocbers = 1; 4088 if (WARN_ON(nocbs_toggle < 0)) 4089 nocbs_toggle = HZ; 4090 if (nrealnocbers > 0) { 4091 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 4092 if (nocb_tasks == NULL) { 4093 TOROUT_ERRSTRING("out of memory"); 4094 firsterr = -ENOMEM; 4095 goto unwind; 4096 } 4097 } else { 4098 nocb_tasks = NULL; 4099 } 4100 for (i = 0; i < nrealnocbers; i++) { 4101 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 4102 if (torture_init_error(firsterr)) 4103 goto unwind; 4104 } 4105 if (stat_interval > 0) { 4106 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 4107 stats_task); 4108 if (torture_init_error(firsterr)) 4109 goto unwind; 4110 } 4111 if (test_no_idle_hz && shuffle_interval > 0) { 4112 firsterr = torture_shuffle_init(shuffle_interval * HZ); 4113 if (torture_init_error(firsterr)) 4114 goto unwind; 4115 } 4116 if (stutter < 0) 4117 stutter = 0; 4118 if (stutter) { 4119 int t; 4120 4121 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 4122 firsterr = torture_stutter_init(stutter * HZ, t); 4123 if (torture_init_error(firsterr)) 4124 goto unwind; 4125 } 4126 if (fqs_duration < 0) 4127 fqs_duration = 0; 4128 if (fqs_holdoff < 0) 4129 fqs_holdoff = 0; 4130 if (fqs_duration && fqs_holdoff) { 4131 /* Create the fqs thread */ 4132 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 4133 fqs_task); 4134 if (torture_init_error(firsterr)) 4135 goto unwind; 4136 } 4137 if (test_boost_interval < 1) 4138 test_boost_interval = 1; 4139 if (test_boost_duration < 2) 4140 test_boost_duration = 2; 4141 if (rcu_torture_can_boost()) { 4142 4143 boost_starttime = jiffies + test_boost_interval * HZ; 4144 4145 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 4146 rcutorture_booster_init, 4147 rcutorture_booster_cleanup); 4148 rcutor_hp = firsterr; 4149 if (torture_init_error(firsterr)) 4150 goto unwind; 4151 } 4152 shutdown_jiffies = jiffies + shutdown_secs * HZ; 4153 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 4154 if (torture_init_error(firsterr)) 4155 goto unwind; 4156 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 4157 rcutorture_sync); 4158 if (torture_init_error(firsterr)) 4159 goto unwind; 4160 firsterr = rcu_torture_stall_init(); 4161 if (torture_init_error(firsterr)) 4162 goto unwind; 4163 firsterr = rcu_torture_fwd_prog_init(); 4164 if (torture_init_error(firsterr)) 4165 goto unwind; 4166 firsterr = rcu_torture_barrier_init(); 4167 if (torture_init_error(firsterr)) 4168 goto unwind; 4169 firsterr = rcu_torture_read_exit_init(); 4170 if (torture_init_error(firsterr)) 4171 goto unwind; 4172 if (preempt_duration > 0) { 4173 firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task); 4174 if (torture_init_error(firsterr)) 4175 goto unwind; 4176 } 4177 if (object_debug) 4178 rcu_test_debug_objects(); 4179 torture_init_end(); 4180 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 4181 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 4182 return 0; 4183 4184 unwind: 4185 torture_init_end(); 4186 rcu_torture_cleanup(); 4187 if (shutdown_secs) { 4188 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 4189 kernel_power_off(); 4190 } 4191 return firsterr; 4192 } 4193 4194 module_init(rcu_torture_init); 4195 module_exit(rcu_torture_cleanup); 4196