1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 // Bits for ->extendables field, extendables param, and related definitions. 59 #define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits. 60 #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits. 62 #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh. 64 #define RCUTORTURE_RDR_IRQ 0x02 // ... disabling interrupts. 65 #define RCUTORTURE_RDR_PREEMPT 0x04 // ... disabling preemption. 66 #define RCUTORTURE_RDR_RBH 0x08 // ... rcu_read_lock_bh(). 67 #define RCUTORTURE_RDR_SCHED 0x10 // ... rcu_read_lock_sched(). 68 #define RCUTORTURE_RDR_RCU_1 0x20 // ... entering another RCU reader. 69 #define RCUTORTURE_RDR_RCU_2 0x40 // ... entering another RCU reader. 70 #define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer. 71 // Note: Manual start, automatic end. 72 #define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above. 73 #define RCUTORTURE_MAX_EXTEND \ 74 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 75 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) // Intentionally omit RCUTORTURE_RDR_UPDOWN. 76 #define RCUTORTURE_RDR_ALLBITS \ 77 (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \ 78 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2) 79 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 80 /* Must be power of two minus one. */ 81 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 82 83 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 84 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 85 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 86 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 87 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 88 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 89 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 90 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 91 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 92 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 93 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 94 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 95 torture_param(bool, gp_cond_exp_full, false, 96 "Use conditional/async full-stateexpedited GP wait primitives"); 97 torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ, 98 "Wait interval for normal conditional grace periods, us (default 16 jiffies)"); 99 torture_param(int, gp_cond_wi_exp, 128, 100 "Wait interval for expedited conditional grace periods, us (default 128 us)"); 101 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 102 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 103 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 104 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 105 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 106 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 107 torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ, 108 "Wait interval for normal polled grace periods, us (default 16 jiffies)"); 109 torture_param(int, gp_poll_wi_exp, 128, 110 "Wait interval for expedited polled grace periods, us (default 128 us)"); 111 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 112 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 113 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 114 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 115 torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers"); 116 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 117 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 118 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 119 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 120 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 121 torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing"); 122 torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period."); 123 torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)"); 124 torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)"); 125 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 126 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 127 torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable"); 128 torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)"); 129 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 130 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 131 torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit."); 132 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 133 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 134 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 135 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 136 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 137 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 138 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 139 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one."); 140 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 141 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 142 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 143 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 144 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 145 torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds."); 146 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 147 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 148 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 149 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 150 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 151 152 static char *torture_type = "rcu"; 153 module_param(torture_type, charp, 0444); 154 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 155 156 static int nrealnocbers; 157 static int nrealreaders; 158 static int nrealfakewriters; 159 static struct task_struct *writer_task; 160 static struct task_struct **fakewriter_tasks; 161 static struct task_struct **reader_tasks; 162 static struct task_struct *updown_task; 163 static struct task_struct **nocb_tasks; 164 static struct task_struct *stats_task; 165 static struct task_struct *fqs_task; 166 static struct task_struct *boost_tasks[NR_CPUS]; 167 static struct task_struct *stall_task; 168 static struct task_struct **fwd_prog_tasks; 169 static struct task_struct **barrier_cbs_tasks; 170 static struct task_struct *barrier_task; 171 static struct task_struct *read_exit_task; 172 static struct task_struct *preempt_task; 173 174 #define RCU_TORTURE_PIPE_LEN 10 175 176 // Mailbox-like structure to check RCU global memory ordering. 177 struct rcu_torture_reader_check { 178 unsigned long rtc_myloops; 179 int rtc_chkrdr; 180 unsigned long rtc_chkloops; 181 int rtc_ready; 182 struct rcu_torture_reader_check *rtc_assigner; 183 } ____cacheline_internodealigned_in_smp; 184 185 // Update-side data structure used to check RCU readers. 186 struct rcu_torture { 187 struct rcu_head rtort_rcu; 188 int rtort_pipe_count; 189 struct list_head rtort_free; 190 int rtort_mbtest; 191 struct rcu_torture_reader_check *rtort_chkp; 192 }; 193 194 static LIST_HEAD(rcu_torture_freelist); 195 static struct rcu_torture __rcu *rcu_torture_current; 196 static unsigned long rcu_torture_current_version; 197 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 198 static DEFINE_SPINLOCK(rcu_torture_lock); 199 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 200 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 201 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 202 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 203 static atomic_t n_rcu_torture_alloc; 204 static atomic_t n_rcu_torture_alloc_fail; 205 static atomic_t n_rcu_torture_free; 206 static atomic_t n_rcu_torture_mberror; 207 static atomic_t n_rcu_torture_mbchk_fail; 208 static atomic_t n_rcu_torture_mbchk_tries; 209 static atomic_t n_rcu_torture_error; 210 static long n_rcu_torture_barrier_error; 211 static long n_rcu_torture_boost_ktrerror; 212 static long n_rcu_torture_boost_failure; 213 static long n_rcu_torture_boosts; 214 static atomic_long_t n_rcu_torture_timers; 215 static long n_barrier_attempts; 216 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 217 static unsigned long n_read_exits; 218 static struct list_head rcu_torture_removed; 219 static unsigned long shutdown_jiffies; 220 static unsigned long start_gp_seq; 221 static atomic_long_t n_nocb_offload; 222 static atomic_long_t n_nocb_deoffload; 223 224 static int rcu_torture_writer_state; 225 #define RTWS_FIXED_DELAY 0 226 #define RTWS_DELAY 1 227 #define RTWS_REPLACE 2 228 #define RTWS_DEF_FREE 3 229 #define RTWS_EXP_SYNC 4 230 #define RTWS_COND_GET 5 231 #define RTWS_COND_GET_FULL 6 232 #define RTWS_COND_GET_EXP 7 233 #define RTWS_COND_GET_EXP_FULL 8 234 #define RTWS_COND_SYNC 9 235 #define RTWS_COND_SYNC_FULL 10 236 #define RTWS_COND_SYNC_EXP 11 237 #define RTWS_COND_SYNC_EXP_FULL 12 238 #define RTWS_POLL_GET 13 239 #define RTWS_POLL_GET_FULL 14 240 #define RTWS_POLL_GET_EXP 15 241 #define RTWS_POLL_GET_EXP_FULL 16 242 #define RTWS_POLL_WAIT 17 243 #define RTWS_POLL_WAIT_FULL 18 244 #define RTWS_POLL_WAIT_EXP 19 245 #define RTWS_POLL_WAIT_EXP_FULL 20 246 #define RTWS_SYNC 21 247 #define RTWS_STUTTER 22 248 #define RTWS_STOPPING 23 249 static const char * const rcu_torture_writer_state_names[] = { 250 "RTWS_FIXED_DELAY", 251 "RTWS_DELAY", 252 "RTWS_REPLACE", 253 "RTWS_DEF_FREE", 254 "RTWS_EXP_SYNC", 255 "RTWS_COND_GET", 256 "RTWS_COND_GET_FULL", 257 "RTWS_COND_GET_EXP", 258 "RTWS_COND_GET_EXP_FULL", 259 "RTWS_COND_SYNC", 260 "RTWS_COND_SYNC_FULL", 261 "RTWS_COND_SYNC_EXP", 262 "RTWS_COND_SYNC_EXP_FULL", 263 "RTWS_POLL_GET", 264 "RTWS_POLL_GET_FULL", 265 "RTWS_POLL_GET_EXP", 266 "RTWS_POLL_GET_EXP_FULL", 267 "RTWS_POLL_WAIT", 268 "RTWS_POLL_WAIT_FULL", 269 "RTWS_POLL_WAIT_EXP", 270 "RTWS_POLL_WAIT_EXP_FULL", 271 "RTWS_SYNC", 272 "RTWS_STUTTER", 273 "RTWS_STOPPING", 274 }; 275 276 /* Record reader segment types and duration for first failing read. */ 277 struct rt_read_seg { 278 int rt_readstate; 279 unsigned long rt_delay_jiffies; 280 unsigned long rt_delay_ms; 281 unsigned long rt_delay_us; 282 bool rt_preempted; 283 int rt_cpu; 284 int rt_end_cpu; 285 unsigned long long rt_gp_seq; 286 unsigned long long rt_gp_seq_end; 287 u64 rt_ts; 288 }; 289 static int err_segs_recorded; 290 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 291 static int rt_read_nsegs; 292 static int rt_read_preempted; 293 294 static const char *rcu_torture_writer_state_getname(void) 295 { 296 unsigned int i = READ_ONCE(rcu_torture_writer_state); 297 298 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 299 return "???"; 300 return rcu_torture_writer_state_names[i]; 301 } 302 303 #ifdef CONFIG_RCU_TRACE 304 static u64 notrace rcu_trace_clock_local(void) 305 { 306 u64 ts = trace_clock_local(); 307 308 (void)do_div(ts, NSEC_PER_USEC); 309 return ts; 310 } 311 #else /* #ifdef CONFIG_RCU_TRACE */ 312 static u64 notrace rcu_trace_clock_local(void) 313 { 314 return 0ULL; 315 } 316 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 317 318 /* 319 * Stop aggressive CPU-hog tests a bit before the end of the test in order 320 * to avoid interfering with test shutdown. 321 */ 322 static bool shutdown_time_arrived(void) 323 { 324 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 325 } 326 327 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 328 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 329 /* and boost task create/destroy. */ 330 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 331 static bool barrier_phase; /* Test phase. */ 332 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 333 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 334 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 335 336 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 337 338 /* 339 * Allocate an element from the rcu_tortures pool. 340 */ 341 static struct rcu_torture * 342 rcu_torture_alloc(void) 343 { 344 struct list_head *p; 345 346 spin_lock_bh(&rcu_torture_lock); 347 if (list_empty(&rcu_torture_freelist)) { 348 atomic_inc(&n_rcu_torture_alloc_fail); 349 spin_unlock_bh(&rcu_torture_lock); 350 return NULL; 351 } 352 atomic_inc(&n_rcu_torture_alloc); 353 p = rcu_torture_freelist.next; 354 list_del_init(p); 355 spin_unlock_bh(&rcu_torture_lock); 356 return container_of(p, struct rcu_torture, rtort_free); 357 } 358 359 /* 360 * Free an element to the rcu_tortures pool. 361 */ 362 static void 363 rcu_torture_free(struct rcu_torture *p) 364 { 365 atomic_inc(&n_rcu_torture_free); 366 spin_lock_bh(&rcu_torture_lock); 367 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 368 spin_unlock_bh(&rcu_torture_lock); 369 } 370 371 /* 372 * Operations vector for selecting different types of tests. 373 */ 374 375 struct rcu_torture_ops { 376 int ttype; 377 void (*init)(void); 378 void (*cleanup)(void); 379 int (*readlock)(void); 380 void (*read_delay)(struct torture_random_state *rrsp, 381 struct rt_read_seg *rtrsp); 382 void (*readunlock)(int idx); 383 int (*readlock_held)(void); // lockdep. 384 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not. 385 int (*down_read)(void); 386 void (*up_read)(int idx); 387 unsigned long (*get_gp_seq)(void); 388 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 389 void (*deferred_free)(struct rcu_torture *p); 390 void (*sync)(void); 391 void (*exp_sync)(void); 392 void (*exp_current)(void); 393 unsigned long (*get_gp_state_exp)(void); 394 unsigned long (*start_gp_poll_exp)(void); 395 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 396 bool (*poll_gp_state_exp)(unsigned long oldstate); 397 void (*cond_sync_exp)(unsigned long oldstate); 398 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 399 unsigned long (*get_comp_state)(void); 400 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 401 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 402 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 403 unsigned long (*get_gp_state)(void); 404 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 405 unsigned long (*start_gp_poll)(void); 406 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 407 bool (*poll_gp_state)(unsigned long oldstate); 408 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 409 bool (*poll_need_2gp)(bool poll, bool poll_full); 410 void (*cond_sync)(unsigned long oldstate); 411 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 412 int poll_active; 413 int poll_active_full; 414 call_rcu_func_t call; 415 void (*cb_barrier)(void); 416 void (*fqs)(void); 417 void (*stats)(void); 418 void (*gp_kthread_dbg)(void); 419 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 420 int (*stall_dur)(void); 421 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 422 void (*gp_slow_register)(atomic_t *rgssp); 423 void (*gp_slow_unregister)(atomic_t *rgssp); 424 bool (*reader_blocked)(void); 425 unsigned long long (*gather_gp_seqs)(void); 426 void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len); 427 void (*set_gpwrap_lag)(unsigned long lag); 428 int (*get_gpwrap_count)(int cpu); 429 long cbflood_max; 430 int irq_capable; 431 int can_boost; 432 int extendables; 433 int slow_gps; 434 int no_pi_lock; 435 int debug_objects; 436 int start_poll_irqsoff; 437 int have_up_down; 438 const char *name; 439 }; 440 441 static struct rcu_torture_ops *cur_ops; 442 443 /* 444 * Definitions for rcu torture testing. 445 */ 446 447 static int torture_readlock_not_held(void) 448 { 449 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 450 } 451 452 static int rcu_torture_read_lock(void) 453 { 454 rcu_read_lock(); 455 return 0; 456 } 457 458 static void 459 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 460 { 461 unsigned long started; 462 unsigned long completed; 463 const unsigned long shortdelay_us = 200; 464 unsigned long longdelay_ms = 300; 465 unsigned long long ts; 466 467 /* We want a short delay sometimes to make a reader delay the grace 468 * period, and we want a long delay occasionally to trigger 469 * force_quiescent_state. */ 470 471 if (!atomic_read(&rcu_fwd_cb_nodelay) && 472 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 473 started = cur_ops->get_gp_seq(); 474 ts = rcu_trace_clock_local(); 475 if ((preempt_count() & HARDIRQ_MASK) || softirq_count()) 476 longdelay_ms = 5; /* Avoid triggering BH limits. */ 477 mdelay(longdelay_ms); 478 rtrsp->rt_delay_ms = longdelay_ms; 479 completed = cur_ops->get_gp_seq(); 480 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 481 started, completed); 482 } 483 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 484 udelay(shortdelay_us); 485 rtrsp->rt_delay_us = shortdelay_us; 486 } 487 if (!preempt_count() && 488 !(torture_random(rrsp) % (nrealreaders * 500))) 489 torture_preempt_schedule(); /* QS only if preemptible. */ 490 } 491 492 static void rcu_torture_read_unlock(int idx) 493 { 494 rcu_read_unlock(); 495 } 496 497 static int rcu_torture_readlock_nesting(void) 498 { 499 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 500 return rcu_preempt_depth(); 501 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 502 return (preempt_count() & PREEMPT_MASK); 503 return -1; 504 } 505 506 /* 507 * Update callback in the pipe. This should be invoked after a grace period. 508 */ 509 static bool 510 rcu_torture_pipe_update_one(struct rcu_torture *rp) 511 { 512 int i; 513 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 514 515 if (rtrcp) { 516 WRITE_ONCE(rp->rtort_chkp, NULL); 517 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 518 } 519 i = rp->rtort_pipe_count; 520 if (i > RCU_TORTURE_PIPE_LEN) 521 i = RCU_TORTURE_PIPE_LEN; 522 atomic_inc(&rcu_torture_wcount[i]); 523 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 524 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 525 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 526 rp->rtort_mbtest = 0; 527 return true; 528 } 529 return false; 530 } 531 532 /* 533 * Update all callbacks in the pipe. Suitable for synchronous grace-period 534 * primitives. 535 */ 536 static void 537 rcu_torture_pipe_update(struct rcu_torture *old_rp) 538 { 539 struct rcu_torture *rp; 540 struct rcu_torture *rp1; 541 542 if (old_rp) 543 list_add(&old_rp->rtort_free, &rcu_torture_removed); 544 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 545 if (rcu_torture_pipe_update_one(rp)) { 546 list_del(&rp->rtort_free); 547 rcu_torture_free(rp); 548 } 549 } 550 } 551 552 static void 553 rcu_torture_cb(struct rcu_head *p) 554 { 555 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 556 557 if (torture_must_stop_irq()) { 558 /* Test is ending, just drop callbacks on the floor. */ 559 /* The next initialization will pick up the pieces. */ 560 return; 561 } 562 if (rcu_torture_pipe_update_one(rp)) 563 rcu_torture_free(rp); 564 else 565 cur_ops->deferred_free(rp); 566 } 567 568 static unsigned long rcu_no_completed(void) 569 { 570 return 0; 571 } 572 573 static void rcu_torture_deferred_free(struct rcu_torture *p) 574 { 575 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 576 } 577 578 static void rcu_sync_torture_init(void) 579 { 580 INIT_LIST_HEAD(&rcu_torture_removed); 581 } 582 583 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 584 { 585 return poll; 586 } 587 588 static struct rcu_torture_ops rcu_ops = { 589 .ttype = RCU_FLAVOR, 590 .init = rcu_sync_torture_init, 591 .readlock = rcu_torture_read_lock, 592 .read_delay = rcu_read_delay, 593 .readunlock = rcu_torture_read_unlock, 594 .readlock_held = torture_readlock_not_held, 595 .readlock_nesting = rcu_torture_readlock_nesting, 596 .get_gp_seq = rcu_get_gp_seq, 597 .gp_diff = rcu_seq_diff, 598 .deferred_free = rcu_torture_deferred_free, 599 .sync = synchronize_rcu, 600 .exp_sync = synchronize_rcu_expedited, 601 .same_gp_state = same_state_synchronize_rcu, 602 .same_gp_state_full = same_state_synchronize_rcu_full, 603 .get_comp_state = get_completed_synchronize_rcu, 604 .get_comp_state_full = get_completed_synchronize_rcu_full, 605 .get_gp_state = get_state_synchronize_rcu, 606 .get_gp_state_full = get_state_synchronize_rcu_full, 607 .start_gp_poll = start_poll_synchronize_rcu, 608 .start_gp_poll_full = start_poll_synchronize_rcu_full, 609 .poll_gp_state = poll_state_synchronize_rcu, 610 .poll_gp_state_full = poll_state_synchronize_rcu_full, 611 .poll_need_2gp = rcu_poll_need_2gp, 612 .cond_sync = cond_synchronize_rcu, 613 .cond_sync_full = cond_synchronize_rcu_full, 614 .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE, 615 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE, 616 .get_gp_state_exp = get_state_synchronize_rcu, 617 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 618 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 619 .poll_gp_state_exp = poll_state_synchronize_rcu, 620 .cond_sync_exp = cond_synchronize_rcu_expedited, 621 .cond_sync_exp_full = cond_synchronize_rcu_expedited_full, 622 .call = call_rcu_hurry, 623 .cb_barrier = rcu_barrier, 624 .fqs = rcu_force_quiescent_state, 625 .gp_kthread_dbg = show_rcu_gp_kthreads, 626 .check_boost_failed = rcu_check_boost_fail, 627 .stall_dur = rcu_jiffies_till_stall_check, 628 .get_gp_data = rcutorture_get_gp_data, 629 .gp_slow_register = rcu_gp_slow_register, 630 .gp_slow_unregister = rcu_gp_slow_unregister, 631 .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU) 632 ? has_rcu_reader_blocked 633 : NULL, 634 .gather_gp_seqs = rcutorture_gather_gp_seqs, 635 .format_gp_seqs = rcutorture_format_gp_seqs, 636 .set_gpwrap_lag = rcu_set_gpwrap_lag, 637 .get_gpwrap_count = rcu_get_gpwrap_count, 638 .irq_capable = 1, 639 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 640 .extendables = RCUTORTURE_MAX_EXTEND, 641 .debug_objects = 1, 642 .start_poll_irqsoff = 1, 643 .name = "rcu" 644 }; 645 646 /* 647 * Don't even think about trying any of these in real life!!! 648 * The names includes "busted", and they really means it! 649 * The only purpose of these functions is to provide a buggy RCU 650 * implementation to make sure that rcutorture correctly emits 651 * buggy-RCU error messages. 652 */ 653 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 654 { 655 /* This is a deliberate bug for testing purposes only! */ 656 rcu_torture_cb(&p->rtort_rcu); 657 } 658 659 static void synchronize_rcu_busted(void) 660 { 661 /* This is a deliberate bug for testing purposes only! */ 662 } 663 664 static void 665 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 666 { 667 /* This is a deliberate bug for testing purposes only! */ 668 func(head); 669 } 670 671 static struct rcu_torture_ops rcu_busted_ops = { 672 .ttype = INVALID_RCU_FLAVOR, 673 .init = rcu_sync_torture_init, 674 .readlock = rcu_torture_read_lock, 675 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 676 .readunlock = rcu_torture_read_unlock, 677 .readlock_held = torture_readlock_not_held, 678 .get_gp_seq = rcu_no_completed, 679 .deferred_free = rcu_busted_torture_deferred_free, 680 .sync = synchronize_rcu_busted, 681 .exp_sync = synchronize_rcu_busted, 682 .call = call_rcu_busted, 683 .gather_gp_seqs = rcutorture_gather_gp_seqs, 684 .format_gp_seqs = rcutorture_format_gp_seqs, 685 .irq_capable = 1, 686 .extendables = RCUTORTURE_MAX_EXTEND, 687 .name = "busted" 688 }; 689 690 /* 691 * Definitions for srcu torture testing. 692 */ 693 694 DEFINE_STATIC_SRCU(srcu_ctl); 695 DEFINE_STATIC_SRCU_FAST(srcu_ctlf); 696 DEFINE_STATIC_SRCU_FAST_UPDOWN(srcu_ctlfud); 697 static struct srcu_struct srcu_ctld; 698 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 699 static struct rcu_torture_ops srcud_ops; 700 701 static void srcu_torture_init(void) 702 { 703 rcu_sync_torture_init(); 704 if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) 705 VERBOSE_TOROUT_STRING("srcu_torture_init normal SRCU"); 706 if (reader_flavor & SRCU_READ_FLAVOR_NMI) 707 VERBOSE_TOROUT_STRING("srcu_torture_init NMI-safe SRCU"); 708 if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 709 srcu_ctlp = &srcu_ctlf; 710 VERBOSE_TOROUT_STRING("srcu_torture_init fast SRCU"); 711 } 712 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 713 srcu_ctlp = &srcu_ctlfud; 714 VERBOSE_TOROUT_STRING("srcu_torture_init fast-up/down SRCU"); 715 } 716 } 717 718 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 719 { 720 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 721 } 722 723 static int srcu_torture_read_lock(void) 724 { 725 int idx; 726 struct srcu_ctr __percpu *scp; 727 int ret = 0; 728 729 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); 730 731 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 732 idx = srcu_read_lock(srcu_ctlp); 733 WARN_ON_ONCE(idx & ~0x1); 734 ret += idx; 735 } 736 if (reader_flavor & SRCU_READ_FLAVOR_NMI) { 737 idx = srcu_read_lock_nmisafe(srcu_ctlp); 738 WARN_ON_ONCE(idx & ~0x1); 739 ret += idx << 1; 740 } 741 if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 742 scp = srcu_read_lock_fast(srcu_ctlp); 743 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 744 WARN_ON_ONCE(idx & ~0x1); 745 ret += idx << 2; 746 } 747 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 748 scp = srcu_read_lock_fast_updown(srcu_ctlp); 749 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 750 WARN_ON_ONCE(idx & ~0x1); 751 ret += idx << 3; 752 } 753 return ret; 754 } 755 756 static void 757 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 758 { 759 long delay; 760 const long uspertick = 1000000 / HZ; 761 const long longdelay = 10; 762 763 /* We want there to be long-running readers, but not all the time. */ 764 765 delay = torture_random(rrsp) % 766 (nrealreaders * 2 * longdelay * uspertick); 767 if (!delay && in_task()) { 768 schedule_timeout_interruptible(longdelay); 769 rtrsp->rt_delay_jiffies = longdelay; 770 } else { 771 rcu_read_delay(rrsp, rtrsp); 772 } 773 } 774 775 static void srcu_torture_read_unlock(int idx) 776 { 777 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 778 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) 779 srcu_read_unlock_fast_updown(srcu_ctlp, 780 __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3)); 781 if (reader_flavor & SRCU_READ_FLAVOR_FAST) 782 srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x4) >> 2)); 783 if (reader_flavor & SRCU_READ_FLAVOR_NMI) 784 srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1); 785 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 786 srcu_read_unlock(srcu_ctlp, idx & 0x1); 787 } 788 789 static int torture_srcu_read_lock_held(void) 790 { 791 return srcu_read_lock_held(srcu_ctlp); 792 } 793 794 static bool srcu_torture_have_up_down(void) 795 { 796 int rf = reader_flavor; 797 798 if (!rf) 799 rf = SRCU_READ_FLAVOR_NORMAL; 800 return !!(cur_ops->have_up_down & rf); 801 } 802 803 static int srcu_torture_down_read(void) 804 { 805 int idx; 806 struct srcu_ctr __percpu *scp; 807 808 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); 809 WARN_ON_ONCE(reader_flavor & (reader_flavor - 1)); 810 811 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 812 idx = srcu_down_read(srcu_ctlp); 813 WARN_ON_ONCE(idx & ~0x1); 814 return idx; 815 } 816 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 817 scp = srcu_down_read_fast(srcu_ctlp); 818 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 819 WARN_ON_ONCE(idx & ~0x1); 820 return idx << 3; 821 } 822 WARN_ON_ONCE(1); 823 return 0; 824 } 825 826 static void srcu_torture_up_read(int idx) 827 { 828 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 829 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) 830 srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3)); 831 else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || 832 !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 833 srcu_up_read(srcu_ctlp, idx & 0x1); 834 else 835 WARN_ON_ONCE(1); 836 } 837 838 static unsigned long srcu_torture_completed(void) 839 { 840 return srcu_batches_completed(srcu_ctlp); 841 } 842 843 static void srcu_torture_deferred_free(struct rcu_torture *rp) 844 { 845 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 846 } 847 848 static void srcu_torture_synchronize(void) 849 { 850 synchronize_srcu(srcu_ctlp); 851 } 852 853 static unsigned long srcu_torture_get_gp_state(void) 854 { 855 return get_state_synchronize_srcu(srcu_ctlp); 856 } 857 858 static unsigned long srcu_torture_start_gp_poll(void) 859 { 860 return start_poll_synchronize_srcu(srcu_ctlp); 861 } 862 863 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 864 { 865 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 866 } 867 868 static void srcu_torture_call(struct rcu_head *head, 869 rcu_callback_t func) 870 { 871 call_srcu(srcu_ctlp, head, func); 872 } 873 874 static void srcu_torture_barrier(void) 875 { 876 srcu_barrier(srcu_ctlp); 877 } 878 879 static void srcu_torture_stats(void) 880 { 881 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 882 } 883 884 static void srcu_torture_synchronize_expedited(void) 885 { 886 synchronize_srcu_expedited(srcu_ctlp); 887 } 888 889 static void srcu_torture_expedite_current(void) 890 { 891 srcu_expedite_current(srcu_ctlp); 892 } 893 894 static struct rcu_torture_ops srcu_ops = { 895 .ttype = SRCU_FLAVOR, 896 .init = srcu_torture_init, 897 .readlock = srcu_torture_read_lock, 898 .read_delay = srcu_read_delay, 899 .readunlock = srcu_torture_read_unlock, 900 .down_read = srcu_torture_down_read, 901 .up_read = srcu_torture_up_read, 902 .readlock_held = torture_srcu_read_lock_held, 903 .get_gp_seq = srcu_torture_completed, 904 .gp_diff = rcu_seq_diff, 905 .deferred_free = srcu_torture_deferred_free, 906 .sync = srcu_torture_synchronize, 907 .exp_sync = srcu_torture_synchronize_expedited, 908 .exp_current = srcu_torture_expedite_current, 909 .same_gp_state = same_state_synchronize_srcu, 910 .get_comp_state = get_completed_synchronize_srcu, 911 .get_gp_state = srcu_torture_get_gp_state, 912 .start_gp_poll = srcu_torture_start_gp_poll, 913 .poll_gp_state = srcu_torture_poll_gp_state, 914 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 915 .call = srcu_torture_call, 916 .cb_barrier = srcu_torture_barrier, 917 .stats = srcu_torture_stats, 918 .get_gp_data = srcu_get_gp_data, 919 .cbflood_max = 50000, 920 .irq_capable = 1, 921 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 922 .debug_objects = 1, 923 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) 924 ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN, 925 .name = "srcu" 926 }; 927 928 static void srcud_torture_init(void) 929 { 930 rcu_sync_torture_init(); 931 if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) { 932 WARN_ON(init_srcu_struct(&srcu_ctld)); 933 VERBOSE_TOROUT_STRING("srcud_torture_init normal SRCU"); 934 } else if (reader_flavor & SRCU_READ_FLAVOR_NMI) { 935 WARN_ON(init_srcu_struct(&srcu_ctld)); 936 VERBOSE_TOROUT_STRING("srcud_torture_init NMI-safe SRCU"); 937 } else if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 938 WARN_ON(init_srcu_struct_fast(&srcu_ctld)); 939 VERBOSE_TOROUT_STRING("srcud_torture_init fast SRCU"); 940 } else if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 941 WARN_ON(init_srcu_struct_fast_updown(&srcu_ctld)); 942 VERBOSE_TOROUT_STRING("srcud_torture_init fast-up/down SRCU"); 943 } else { 944 WARN_ON(init_srcu_struct(&srcu_ctld)); 945 } 946 srcu_ctlp = &srcu_ctld; 947 } 948 949 static void srcu_torture_cleanup(void) 950 { 951 cleanup_srcu_struct(&srcu_ctld); 952 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 953 } 954 955 /* As above, but dynamically allocated. */ 956 static struct rcu_torture_ops srcud_ops = { 957 .ttype = SRCU_FLAVOR, 958 .init = srcud_torture_init, 959 .cleanup = srcu_torture_cleanup, 960 .readlock = srcu_torture_read_lock, 961 .read_delay = srcu_read_delay, 962 .readunlock = srcu_torture_read_unlock, 963 .readlock_held = torture_srcu_read_lock_held, 964 .down_read = srcu_torture_down_read, 965 .up_read = srcu_torture_up_read, 966 .get_gp_seq = srcu_torture_completed, 967 .gp_diff = rcu_seq_diff, 968 .deferred_free = srcu_torture_deferred_free, 969 .sync = srcu_torture_synchronize, 970 .exp_sync = srcu_torture_synchronize_expedited, 971 .exp_current = srcu_torture_expedite_current, 972 .same_gp_state = same_state_synchronize_srcu, 973 .get_comp_state = get_completed_synchronize_srcu, 974 .get_gp_state = srcu_torture_get_gp_state, 975 .start_gp_poll = srcu_torture_start_gp_poll, 976 .poll_gp_state = srcu_torture_poll_gp_state, 977 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 978 .call = srcu_torture_call, 979 .cb_barrier = srcu_torture_barrier, 980 .stats = srcu_torture_stats, 981 .get_gp_data = srcu_get_gp_data, 982 .cbflood_max = 50000, 983 .irq_capable = 1, 984 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 985 .debug_objects = 1, 986 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) 987 ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN, 988 .name = "srcud" 989 }; 990 991 /* As above, but broken due to inappropriate reader extension. */ 992 static struct rcu_torture_ops busted_srcud_ops = { 993 .ttype = SRCU_FLAVOR, 994 .init = srcu_torture_init, 995 .cleanup = srcu_torture_cleanup, 996 .readlock = srcu_torture_read_lock, 997 .read_delay = rcu_read_delay, 998 .readunlock = srcu_torture_read_unlock, 999 .readlock_held = torture_srcu_read_lock_held, 1000 .get_gp_seq = srcu_torture_completed, 1001 .deferred_free = srcu_torture_deferred_free, 1002 .sync = srcu_torture_synchronize, 1003 .exp_sync = srcu_torture_synchronize_expedited, 1004 .call = srcu_torture_call, 1005 .cb_barrier = srcu_torture_barrier, 1006 .stats = srcu_torture_stats, 1007 .irq_capable = 1, 1008 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 1009 .extendables = RCUTORTURE_MAX_EXTEND, 1010 .name = "busted_srcud" 1011 }; 1012 1013 /* 1014 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 1015 * This implementation does not work well with CPU hotplug nor 1016 * with rcutorture's shuffling. 1017 */ 1018 1019 static void synchronize_rcu_trivial(void) 1020 { 1021 int cpu; 1022 1023 for_each_online_cpu(cpu) { 1024 torture_sched_setaffinity(current->pid, cpumask_of(cpu), true); 1025 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 1026 } 1027 } 1028 1029 static void rcu_sync_torture_init_trivial(void) 1030 { 1031 rcu_sync_torture_init(); 1032 // if (onoff_interval || shuffle_interval) { 1033 if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) { 1034 onoff_interval = 0; 1035 shuffle_interval = 0; 1036 } 1037 } 1038 1039 static int rcu_torture_read_lock_trivial(void) 1040 { 1041 preempt_disable(); 1042 return 0; 1043 } 1044 1045 static void rcu_torture_read_unlock_trivial(int idx) 1046 { 1047 preempt_enable(); 1048 } 1049 1050 static struct rcu_torture_ops trivial_ops = { 1051 .ttype = RCU_TRIVIAL_FLAVOR, 1052 .init = rcu_sync_torture_init_trivial, 1053 .readlock = rcu_torture_read_lock_trivial, 1054 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1055 .readunlock = rcu_torture_read_unlock_trivial, 1056 .readlock_held = torture_readlock_not_held, 1057 .get_gp_seq = rcu_no_completed, 1058 .sync = synchronize_rcu_trivial, 1059 .exp_sync = synchronize_rcu_trivial, 1060 .irq_capable = 1, 1061 .name = "trivial" 1062 }; 1063 1064 #ifdef CONFIG_TASKS_RCU 1065 1066 /* 1067 * Definitions for RCU-tasks torture testing. 1068 */ 1069 1070 static int tasks_torture_read_lock(void) 1071 { 1072 return 0; 1073 } 1074 1075 static void tasks_torture_read_unlock(int idx) 1076 { 1077 } 1078 1079 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 1080 { 1081 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 1082 } 1083 1084 static void synchronize_rcu_mult_test(void) 1085 { 1086 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 1087 } 1088 1089 static struct rcu_torture_ops tasks_ops = { 1090 .ttype = RCU_TASKS_FLAVOR, 1091 .init = rcu_sync_torture_init, 1092 .readlock = tasks_torture_read_lock, 1093 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1094 .readunlock = tasks_torture_read_unlock, 1095 .get_gp_seq = rcu_no_completed, 1096 .deferred_free = rcu_tasks_torture_deferred_free, 1097 .sync = synchronize_rcu_tasks, 1098 .exp_sync = synchronize_rcu_mult_test, 1099 .call = call_rcu_tasks, 1100 .cb_barrier = rcu_barrier_tasks, 1101 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 1102 .get_gp_data = rcu_tasks_get_gp_data, 1103 .irq_capable = 1, 1104 .slow_gps = 1, 1105 .name = "tasks" 1106 }; 1107 1108 #define TASKS_OPS &tasks_ops, 1109 1110 #else // #ifdef CONFIG_TASKS_RCU 1111 1112 #define TASKS_OPS 1113 1114 #endif // #else #ifdef CONFIG_TASKS_RCU 1115 1116 1117 #ifdef CONFIG_TASKS_RUDE_RCU 1118 1119 /* 1120 * Definitions for rude RCU-tasks torture testing. 1121 */ 1122 1123 static struct rcu_torture_ops tasks_rude_ops = { 1124 .ttype = RCU_TASKS_RUDE_FLAVOR, 1125 .init = rcu_sync_torture_init, 1126 .readlock = rcu_torture_read_lock_trivial, 1127 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1128 .readunlock = rcu_torture_read_unlock_trivial, 1129 .get_gp_seq = rcu_no_completed, 1130 .sync = synchronize_rcu_tasks_rude, 1131 .exp_sync = synchronize_rcu_tasks_rude, 1132 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 1133 .get_gp_data = rcu_tasks_rude_get_gp_data, 1134 .cbflood_max = 50000, 1135 .irq_capable = 1, 1136 .name = "tasks-rude" 1137 }; 1138 1139 #define TASKS_RUDE_OPS &tasks_rude_ops, 1140 1141 #else // #ifdef CONFIG_TASKS_RUDE_RCU 1142 1143 #define TASKS_RUDE_OPS 1144 1145 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 1146 1147 1148 #ifdef CONFIG_TASKS_TRACE_RCU 1149 1150 /* 1151 * Definitions for tracing RCU-tasks torture testing. 1152 */ 1153 1154 static int tasks_tracing_torture_read_lock(void) 1155 { 1156 rcu_read_lock_trace(); 1157 return 0; 1158 } 1159 1160 static void tasks_tracing_torture_read_unlock(int idx) 1161 { 1162 rcu_read_unlock_trace(); 1163 } 1164 1165 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 1166 { 1167 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 1168 } 1169 1170 static struct rcu_torture_ops tasks_tracing_ops = { 1171 .ttype = RCU_TASKS_TRACING_FLAVOR, 1172 .init = rcu_sync_torture_init, 1173 .readlock = tasks_tracing_torture_read_lock, 1174 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 1175 .readunlock = tasks_tracing_torture_read_unlock, 1176 .readlock_held = rcu_read_lock_trace_held, 1177 .get_gp_seq = rcu_no_completed, 1178 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 1179 .sync = synchronize_rcu_tasks_trace, 1180 .exp_sync = synchronize_rcu_tasks_trace, 1181 .call = call_rcu_tasks_trace, 1182 .cb_barrier = rcu_barrier_tasks_trace, 1183 .cbflood_max = 50000, 1184 .irq_capable = 1, 1185 .slow_gps = 1, 1186 .name = "tasks-tracing" 1187 }; 1188 1189 #define TASKS_TRACING_OPS &tasks_tracing_ops, 1190 1191 #else // #ifdef CONFIG_TASKS_TRACE_RCU 1192 1193 #define TASKS_TRACING_OPS 1194 1195 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 1196 1197 1198 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 1199 { 1200 if (!cur_ops->gp_diff) 1201 return new - old; 1202 return cur_ops->gp_diff(new, old); 1203 } 1204 1205 /* 1206 * RCU torture priority-boost testing. Runs one real-time thread per 1207 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1208 * for them to complete. If a given grace period takes too long, we assume 1209 * that priority inversion has occurred. 1210 */ 1211 1212 static int old_rt_runtime = -1; 1213 1214 static void rcu_torture_disable_rt_throttle(void) 1215 { 1216 /* 1217 * Disable RT throttling so that rcutorture's boost threads don't get 1218 * throttled. Only possible if rcutorture is built-in otherwise the 1219 * user should manually do this by setting the sched_rt_period_us and 1220 * sched_rt_runtime sysctls. 1221 */ 1222 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1223 return; 1224 1225 old_rt_runtime = sysctl_sched_rt_runtime; 1226 sysctl_sched_rt_runtime = -1; 1227 } 1228 1229 static void rcu_torture_enable_rt_throttle(void) 1230 { 1231 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1232 return; 1233 1234 sysctl_sched_rt_runtime = old_rt_runtime; 1235 old_rt_runtime = -1; 1236 } 1237 1238 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1239 { 1240 int cpu; 1241 static int dbg_done; 1242 unsigned long end = jiffies; 1243 bool gp_done; 1244 unsigned long j; 1245 static unsigned long last_persist; 1246 unsigned long lp; 1247 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1248 1249 if (end - *start > mininterval) { 1250 // Recheck after checking time to avoid false positives. 1251 smp_mb(); // Time check before grace-period check. 1252 if (cur_ops->poll_gp_state(gp_state)) 1253 return false; // passed, though perhaps just barely 1254 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1255 // At most one persisted message per boost test. 1256 j = jiffies; 1257 lp = READ_ONCE(last_persist); 1258 if (time_after(j, lp + mininterval) && 1259 cmpxchg(&last_persist, lp, j) == lp) { 1260 if (cpu < 0) 1261 pr_info("Boost inversion persisted: QS from all CPUs\n"); 1262 else 1263 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1264 } 1265 return false; // passed on a technicality 1266 } 1267 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1268 n_rcu_torture_boost_failure++; 1269 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1270 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1271 current->rt_priority, gp_state, end - *start); 1272 cur_ops->gp_kthread_dbg(); 1273 // Recheck after print to flag grace period ending during splat. 1274 gp_done = cur_ops->poll_gp_state(gp_state); 1275 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1276 gp_done ? "ended already" : "still pending"); 1277 1278 } 1279 1280 return true; // failed 1281 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1282 *start = jiffies; 1283 } 1284 1285 return false; // passed 1286 } 1287 1288 static int rcu_torture_boost(void *arg) 1289 { 1290 unsigned long endtime; 1291 unsigned long gp_state; 1292 unsigned long gp_state_time; 1293 unsigned long oldstarttime; 1294 unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ; 1295 1296 if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) { 1297 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1298 } else { 1299 VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period"); 1300 while (time_before(jiffies, booststarttime)) { 1301 schedule_timeout_idle(HZ); 1302 if (kthread_should_stop()) 1303 goto cleanup; 1304 } 1305 VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period"); 1306 } 1307 1308 /* Set real-time priority. */ 1309 sched_set_fifo_low(current); 1310 1311 /* Each pass through the following loop does one boost-test cycle. */ 1312 do { 1313 bool failed = false; // Test failed already in this test interval 1314 bool gp_initiated = false; 1315 1316 if (kthread_should_stop()) 1317 goto checkwait; 1318 1319 /* Wait for the next test interval. */ 1320 oldstarttime = READ_ONCE(boost_starttime); 1321 while (time_before(jiffies, oldstarttime)) { 1322 schedule_timeout_interruptible(oldstarttime - jiffies); 1323 if (stutter_wait("rcu_torture_boost")) 1324 sched_set_fifo_low(current); 1325 if (torture_must_stop()) 1326 goto checkwait; 1327 } 1328 1329 // Do one boost-test interval. 1330 endtime = oldstarttime + test_boost_duration * HZ; 1331 while (time_before(jiffies, endtime)) { 1332 // Has current GP gone too long? 1333 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1334 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1335 // If we don't have a grace period in flight, start one. 1336 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1337 gp_state = cur_ops->start_gp_poll(); 1338 gp_initiated = true; 1339 gp_state_time = jiffies; 1340 } 1341 if (stutter_wait("rcu_torture_boost")) { 1342 sched_set_fifo_low(current); 1343 // If the grace period already ended, 1344 // we don't know when that happened, so 1345 // start over. 1346 if (cur_ops->poll_gp_state(gp_state)) 1347 gp_initiated = false; 1348 } 1349 if (torture_must_stop()) 1350 goto checkwait; 1351 } 1352 1353 // In case the grace period extended beyond the end of the loop. 1354 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1355 rcu_torture_boost_failed(gp_state, &gp_state_time); 1356 1357 /* 1358 * Set the start time of the next test interval. 1359 * Yes, this is vulnerable to long delays, but such 1360 * delays simply cause a false negative for the next 1361 * interval. Besides, we are running at RT priority, 1362 * so delays should be relatively rare. 1363 */ 1364 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1365 if (mutex_trylock(&boost_mutex)) { 1366 if (oldstarttime == boost_starttime) { 1367 WRITE_ONCE(boost_starttime, 1368 jiffies + test_boost_interval * HZ); 1369 n_rcu_torture_boosts++; 1370 } 1371 mutex_unlock(&boost_mutex); 1372 break; 1373 } 1374 schedule_timeout_uninterruptible(HZ / 20); 1375 } 1376 1377 /* Go do the stutter. */ 1378 checkwait: if (stutter_wait("rcu_torture_boost")) 1379 sched_set_fifo_low(current); 1380 } while (!torture_must_stop()); 1381 1382 cleanup: 1383 /* Clean up and exit. */ 1384 while (!kthread_should_stop()) { 1385 torture_shutdown_absorb("rcu_torture_boost"); 1386 schedule_timeout_uninterruptible(HZ / 20); 1387 } 1388 torture_kthread_stopping("rcu_torture_boost"); 1389 return 0; 1390 } 1391 1392 /* 1393 * RCU torture force-quiescent-state kthread. Repeatedly induces 1394 * bursts of calls to force_quiescent_state(), increasing the probability 1395 * of occurrence of some important types of race conditions. 1396 */ 1397 static int 1398 rcu_torture_fqs(void *arg) 1399 { 1400 unsigned long fqs_resume_time; 1401 int fqs_burst_remaining; 1402 int oldnice = task_nice(current); 1403 1404 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1405 do { 1406 fqs_resume_time = jiffies + fqs_stutter * HZ; 1407 while (time_before(jiffies, fqs_resume_time) && 1408 !kthread_should_stop()) { 1409 schedule_timeout_interruptible(HZ / 20); 1410 } 1411 fqs_burst_remaining = fqs_duration; 1412 while (fqs_burst_remaining > 0 && 1413 !kthread_should_stop()) { 1414 cur_ops->fqs(); 1415 udelay(fqs_holdoff); 1416 fqs_burst_remaining -= fqs_holdoff; 1417 } 1418 if (stutter_wait("rcu_torture_fqs")) 1419 sched_set_normal(current, oldnice); 1420 } while (!torture_must_stop()); 1421 torture_kthread_stopping("rcu_torture_fqs"); 1422 return 0; 1423 } 1424 1425 // Used by writers to randomly choose from the available grace-period primitives. 1426 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1427 static int nsynctypes; 1428 1429 /* 1430 * Determine which grace-period primitives are available. 1431 */ 1432 static void rcu_torture_write_types(void) 1433 { 1434 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1435 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1436 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1437 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1438 1439 /* Initialize synctype[] array. If none set, take default. */ 1440 if (!gp_cond1 && 1441 !gp_cond_exp1 && 1442 !gp_cond_full1 && 1443 !gp_cond_exp_full1 && 1444 !gp_exp1 && 1445 !gp_poll_exp1 && 1446 !gp_poll_exp_full1 && 1447 !gp_normal1 && 1448 !gp_poll1 && 1449 !gp_poll_full1 && 1450 !gp_sync1) { 1451 gp_cond1 = true; 1452 gp_cond_exp1 = true; 1453 gp_cond_full1 = true; 1454 gp_cond_exp_full1 = true; 1455 gp_exp1 = true; 1456 gp_poll_exp1 = true; 1457 gp_poll_exp_full1 = true; 1458 gp_normal1 = true; 1459 gp_poll1 = true; 1460 gp_poll_full1 = true; 1461 gp_sync1 = true; 1462 } 1463 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1464 synctype[nsynctypes++] = RTWS_COND_GET; 1465 pr_info("%s: Testing conditional GPs.\n", __func__); 1466 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1467 pr_alert("%s: gp_cond without primitives.\n", __func__); 1468 } 1469 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1470 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1471 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1472 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1473 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1474 } 1475 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1476 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1477 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1478 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1479 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1480 } 1481 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1482 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1483 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1484 } else if (gp_cond_exp_full && 1485 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1486 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1487 } 1488 if (gp_exp1 && cur_ops->exp_sync) { 1489 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1490 pr_info("%s: Testing expedited GPs.\n", __func__); 1491 } else if (gp_exp && !cur_ops->exp_sync) { 1492 pr_alert("%s: gp_exp without primitives.\n", __func__); 1493 } 1494 if (gp_normal1 && cur_ops->deferred_free) { 1495 synctype[nsynctypes++] = RTWS_DEF_FREE; 1496 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1497 } else if (gp_normal && !cur_ops->deferred_free) { 1498 pr_alert("%s: gp_normal without primitives.\n", __func__); 1499 } 1500 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1501 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1502 synctype[nsynctypes++] = RTWS_POLL_GET; 1503 pr_info("%s: Testing polling GPs.\n", __func__); 1504 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1505 pr_alert("%s: gp_poll without primitives.\n", __func__); 1506 } 1507 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1508 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1509 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1510 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1511 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1512 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1513 } 1514 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1515 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1516 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1517 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1518 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1519 } 1520 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1521 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1522 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1523 } else if (gp_poll_exp_full && 1524 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1525 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1526 } 1527 if (gp_sync1 && cur_ops->sync) { 1528 synctype[nsynctypes++] = RTWS_SYNC; 1529 pr_info("%s: Testing normal GPs.\n", __func__); 1530 } else if (gp_sync && !cur_ops->sync) { 1531 pr_alert("%s: gp_sync without primitives.\n", __func__); 1532 } 1533 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes); 1534 pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp); 1535 } 1536 1537 /* 1538 * Do the specified rcu_torture_writer() synchronous grace period, 1539 * while also testing out the polled APIs. Note well that the single-CPU 1540 * grace-period optimizations must be accounted for. 1541 */ 1542 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1543 { 1544 unsigned long cookie; 1545 struct rcu_gp_oldstate cookie_full; 1546 bool dopoll; 1547 bool dopoll_full; 1548 unsigned long r = torture_random(trsp); 1549 1550 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1551 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1552 if (dopoll || dopoll_full) 1553 cpus_read_lock(); 1554 if (dopoll) 1555 cookie = cur_ops->get_gp_state(); 1556 if (dopoll_full) 1557 cur_ops->get_gp_state_full(&cookie_full); 1558 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1559 sync(); 1560 sync(); 1561 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1562 "%s: Cookie check 3 failed %pS() online %*pbl.", 1563 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1564 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1565 "%s: Cookie check 4 failed %pS() online %*pbl", 1566 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1567 if (dopoll || dopoll_full) 1568 cpus_read_unlock(); 1569 } 1570 1571 /* 1572 * RCU torture writer kthread. Repeatedly substitutes a new structure 1573 * for that pointed to by rcu_torture_current, freeing the old structure 1574 * after a series of grace periods (the "pipeline"). 1575 */ 1576 static int 1577 rcu_torture_writer(void *arg) 1578 { 1579 bool booting_still = false; 1580 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1581 unsigned long cookie; 1582 struct rcu_gp_oldstate cookie_full; 1583 int expediting = 0; 1584 unsigned long gp_snap; 1585 unsigned long gp_snap1; 1586 struct rcu_gp_oldstate gp_snap_full; 1587 struct rcu_gp_oldstate gp_snap1_full; 1588 int i; 1589 int idx; 1590 unsigned long j; 1591 int oldnice = task_nice(current); 1592 struct rcu_gp_oldstate *rgo = NULL; 1593 int rgo_size = 0; 1594 struct rcu_torture *rp; 1595 struct rcu_torture *old_rp; 1596 static DEFINE_TORTURE_RANDOM(rand); 1597 unsigned long stallsdone = jiffies; 1598 bool stutter_waited; 1599 unsigned long *ulo = NULL; 1600 int ulo_size = 0; 1601 1602 // If a new stall test is added, this must be adjusted. 1603 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1604 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * 1605 HZ * (stall_cpu_repeat + 1); 1606 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1607 if (!can_expedite) 1608 pr_alert("%s" TORTURE_FLAG 1609 " GP expediting controlled from boot/sysfs for %s.\n", 1610 torture_type, cur_ops->name); 1611 if (WARN_ONCE(nsynctypes == 0, 1612 "%s: No update-side primitives.\n", __func__)) { 1613 /* 1614 * No updates primitives, so don't try updating. 1615 * The resulting test won't be testing much, hence the 1616 * above WARN_ONCE(). 1617 */ 1618 rcu_torture_writer_state = RTWS_STOPPING; 1619 torture_kthread_stopping("rcu_torture_writer"); 1620 return 0; 1621 } 1622 if (cur_ops->poll_active > 0) { 1623 ulo = kcalloc(cur_ops->poll_active, sizeof(*ulo), GFP_KERNEL); 1624 if (!WARN_ON(!ulo)) 1625 ulo_size = cur_ops->poll_active; 1626 } 1627 if (cur_ops->poll_active_full > 0) { 1628 rgo = kcalloc(cur_ops->poll_active_full, sizeof(*rgo), GFP_KERNEL); 1629 if (!WARN_ON(!rgo)) 1630 rgo_size = cur_ops->poll_active_full; 1631 } 1632 1633 // If the system is still booting, let it finish. 1634 j = jiffies; 1635 while (!torture_must_stop() && !rcu_inkernel_boot_has_ended()) { 1636 booting_still = true; 1637 schedule_timeout_interruptible(HZ); 1638 } 1639 if (booting_still) 1640 pr_alert("%s" TORTURE_FLAG " Waited %lu jiffies for boot to complete.\n", 1641 torture_type, jiffies - j); 1642 1643 do { 1644 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1645 torture_hrtimeout_us(500, 1000, &rand); 1646 rp = rcu_torture_alloc(); 1647 if (rp == NULL) 1648 continue; 1649 rp->rtort_pipe_count = 0; 1650 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1651 rcu_torture_writer_state = RTWS_DELAY; 1652 udelay(torture_random(&rand) & 0x3ff); 1653 rcu_torture_writer_state = RTWS_REPLACE; 1654 old_rp = rcu_dereference_check(rcu_torture_current, 1655 current == writer_task); 1656 rp->rtort_mbtest = 1; 1657 rcu_assign_pointer(rcu_torture_current, rp); 1658 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1659 if (old_rp) { 1660 i = old_rp->rtort_pipe_count; 1661 if (i > RCU_TORTURE_PIPE_LEN) 1662 i = RCU_TORTURE_PIPE_LEN; 1663 atomic_inc(&rcu_torture_wcount[i]); 1664 WRITE_ONCE(old_rp->rtort_pipe_count, 1665 old_rp->rtort_pipe_count + 1); 1666 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1667 1668 // Make sure readers block polled grace periods. 1669 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1670 idx = cur_ops->readlock(); 1671 cookie = cur_ops->get_gp_state(); 1672 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1673 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1674 __func__, 1675 rcu_torture_writer_state_getname(), 1676 rcu_torture_writer_state, 1677 cookie, cur_ops->get_gp_state()); 1678 if (cur_ops->get_comp_state) { 1679 cookie = cur_ops->get_comp_state(); 1680 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1681 } 1682 cur_ops->readunlock(idx); 1683 } 1684 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1685 idx = cur_ops->readlock(); 1686 cur_ops->get_gp_state_full(&cookie_full); 1687 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1688 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1689 __func__, 1690 rcu_torture_writer_state_getname(), 1691 rcu_torture_writer_state, 1692 cpumask_pr_args(cpu_online_mask)); 1693 if (cur_ops->get_comp_state_full) { 1694 cur_ops->get_comp_state_full(&cookie_full); 1695 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1696 } 1697 cur_ops->readunlock(idx); 1698 } 1699 switch (synctype[torture_random(&rand) % nsynctypes]) { 1700 case RTWS_DEF_FREE: 1701 rcu_torture_writer_state = RTWS_DEF_FREE; 1702 cur_ops->deferred_free(old_rp); 1703 break; 1704 case RTWS_EXP_SYNC: 1705 rcu_torture_writer_state = RTWS_EXP_SYNC; 1706 do_rtws_sync(&rand, cur_ops->exp_sync); 1707 rcu_torture_pipe_update(old_rp); 1708 break; 1709 case RTWS_COND_GET: 1710 rcu_torture_writer_state = RTWS_COND_GET; 1711 gp_snap = cur_ops->get_gp_state(); 1712 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1713 1000, &rand); 1714 rcu_torture_writer_state = RTWS_COND_SYNC; 1715 cur_ops->cond_sync(gp_snap); 1716 rcu_torture_pipe_update(old_rp); 1717 break; 1718 case RTWS_COND_GET_EXP: 1719 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1720 gp_snap = cur_ops->get_gp_state_exp(); 1721 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1722 1000, &rand); 1723 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1724 cur_ops->cond_sync_exp(gp_snap); 1725 rcu_torture_pipe_update(old_rp); 1726 break; 1727 case RTWS_COND_GET_FULL: 1728 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1729 cur_ops->get_gp_state_full(&gp_snap_full); 1730 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1731 1000, &rand); 1732 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1733 cur_ops->cond_sync_full(&gp_snap_full); 1734 rcu_torture_pipe_update(old_rp); 1735 break; 1736 case RTWS_COND_GET_EXP_FULL: 1737 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1738 cur_ops->get_gp_state_full(&gp_snap_full); 1739 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1740 1000, &rand); 1741 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1742 cur_ops->cond_sync_exp_full(&gp_snap_full); 1743 rcu_torture_pipe_update(old_rp); 1744 break; 1745 case RTWS_POLL_GET: 1746 rcu_torture_writer_state = RTWS_POLL_GET; 1747 for (i = 0; i < ulo_size; i++) 1748 ulo[i] = cur_ops->get_comp_state(); 1749 gp_snap = cur_ops->start_gp_poll(); 1750 rcu_torture_writer_state = RTWS_POLL_WAIT; 1751 if (cur_ops->exp_current && !torture_random(&rand) % 0xff) 1752 cur_ops->exp_current(); 1753 while (!cur_ops->poll_gp_state(gp_snap)) { 1754 gp_snap1 = cur_ops->get_gp_state(); 1755 for (i = 0; i < ulo_size; i++) 1756 if (cur_ops->poll_gp_state(ulo[i]) || 1757 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1758 ulo[i] = gp_snap1; 1759 break; 1760 } 1761 WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size); 1762 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1763 1000, &rand); 1764 } 1765 rcu_torture_pipe_update(old_rp); 1766 break; 1767 case RTWS_POLL_GET_FULL: 1768 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1769 for (i = 0; i < rgo_size; i++) 1770 cur_ops->get_comp_state_full(&rgo[i]); 1771 cur_ops->start_gp_poll_full(&gp_snap_full); 1772 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1773 if (cur_ops->exp_current && !torture_random(&rand) % 0xff) 1774 cur_ops->exp_current(); 1775 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1776 cur_ops->get_gp_state_full(&gp_snap1_full); 1777 for (i = 0; i < rgo_size; i++) 1778 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1779 cur_ops->same_gp_state_full(&rgo[i], 1780 &gp_snap1_full)) { 1781 rgo[i] = gp_snap1_full; 1782 break; 1783 } 1784 WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size); 1785 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1786 1000, &rand); 1787 } 1788 rcu_torture_pipe_update(old_rp); 1789 break; 1790 case RTWS_POLL_GET_EXP: 1791 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1792 gp_snap = cur_ops->start_gp_poll_exp(); 1793 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1794 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1795 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1796 1000, &rand); 1797 rcu_torture_pipe_update(old_rp); 1798 break; 1799 case RTWS_POLL_GET_EXP_FULL: 1800 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1801 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1802 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1803 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1804 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1805 1000, &rand); 1806 rcu_torture_pipe_update(old_rp); 1807 break; 1808 case RTWS_SYNC: 1809 rcu_torture_writer_state = RTWS_SYNC; 1810 do_rtws_sync(&rand, cur_ops->sync); 1811 rcu_torture_pipe_update(old_rp); 1812 break; 1813 default: 1814 WARN_ON_ONCE(1); 1815 break; 1816 } 1817 } 1818 WRITE_ONCE(rcu_torture_current_version, 1819 rcu_torture_current_version + 1); 1820 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1821 if (can_expedite && 1822 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1823 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1824 if (expediting >= 0) 1825 rcu_expedite_gp(); 1826 else 1827 rcu_unexpedite_gp(); 1828 if (++expediting > 3) 1829 expediting = -expediting; 1830 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1831 can_expedite = !rcu_gp_is_expedited() && 1832 !rcu_gp_is_normal(); 1833 } 1834 rcu_torture_writer_state = RTWS_STUTTER; 1835 stutter_waited = stutter_wait("rcu_torture_writer"); 1836 if (stutter_waited && 1837 !atomic_read(&rcu_fwd_cb_nodelay) && 1838 !cur_ops->slow_gps && 1839 !torture_must_stop() && 1840 time_after(jiffies, stallsdone)) 1841 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1842 if (list_empty(&rcu_tortures[i].rtort_free) && 1843 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1844 tracing_off(); 1845 if (cur_ops->gp_kthread_dbg) 1846 cur_ops->gp_kthread_dbg(); 1847 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1848 rcu_ftrace_dump(DUMP_ALL); 1849 break; 1850 } 1851 if (stutter_waited) 1852 sched_set_normal(current, oldnice); 1853 } while (!torture_must_stop()); 1854 rcu_torture_current = NULL; // Let stats task know that we are done. 1855 /* Reset expediting back to unexpedited. */ 1856 if (expediting > 0) 1857 expediting = -expediting; 1858 while (can_expedite && expediting++ < 0) 1859 rcu_unexpedite_gp(); 1860 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1861 if (!can_expedite) 1862 pr_alert("%s" TORTURE_FLAG 1863 " Dynamic grace-period expediting was disabled.\n", 1864 torture_type); 1865 kfree(ulo); 1866 kfree(rgo); 1867 rcu_torture_writer_state = RTWS_STOPPING; 1868 torture_kthread_stopping("rcu_torture_writer"); 1869 return 0; 1870 } 1871 1872 /* 1873 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1874 * delay between calls. 1875 */ 1876 static int 1877 rcu_torture_fakewriter(void *arg) 1878 { 1879 unsigned long gp_snap; 1880 struct rcu_gp_oldstate gp_snap_full; 1881 DEFINE_TORTURE_RANDOM(rand); 1882 1883 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1884 set_user_nice(current, MAX_NICE); 1885 1886 if (WARN_ONCE(nsynctypes == 0, 1887 "%s: No update-side primitives.\n", __func__)) { 1888 /* 1889 * No updates primitives, so don't try updating. 1890 * The resulting test won't be testing much, hence the 1891 * above WARN_ONCE(). 1892 */ 1893 torture_kthread_stopping("rcu_torture_fakewriter"); 1894 return 0; 1895 } 1896 1897 do { 1898 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1899 if (cur_ops->cb_barrier != NULL && 1900 torture_random(&rand) % (nrealfakewriters * 8) == 0) { 1901 cur_ops->cb_barrier(); 1902 } else { 1903 switch (synctype[torture_random(&rand) % nsynctypes]) { 1904 case RTWS_DEF_FREE: 1905 break; 1906 case RTWS_EXP_SYNC: 1907 cur_ops->exp_sync(); 1908 break; 1909 case RTWS_COND_GET: 1910 gp_snap = cur_ops->get_gp_state(); 1911 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1912 cur_ops->cond_sync(gp_snap); 1913 break; 1914 case RTWS_COND_GET_EXP: 1915 gp_snap = cur_ops->get_gp_state_exp(); 1916 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1917 cur_ops->cond_sync_exp(gp_snap); 1918 break; 1919 case RTWS_COND_GET_FULL: 1920 cur_ops->get_gp_state_full(&gp_snap_full); 1921 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1922 cur_ops->cond_sync_full(&gp_snap_full); 1923 break; 1924 case RTWS_COND_GET_EXP_FULL: 1925 cur_ops->get_gp_state_full(&gp_snap_full); 1926 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1927 cur_ops->cond_sync_exp_full(&gp_snap_full); 1928 break; 1929 case RTWS_POLL_GET: 1930 if (cur_ops->start_poll_irqsoff) 1931 local_irq_disable(); 1932 gp_snap = cur_ops->start_gp_poll(); 1933 if (cur_ops->start_poll_irqsoff) 1934 local_irq_enable(); 1935 while (!cur_ops->poll_gp_state(gp_snap)) { 1936 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1937 &rand); 1938 } 1939 break; 1940 case RTWS_POLL_GET_FULL: 1941 if (cur_ops->start_poll_irqsoff) 1942 local_irq_disable(); 1943 cur_ops->start_gp_poll_full(&gp_snap_full); 1944 if (cur_ops->start_poll_irqsoff) 1945 local_irq_enable(); 1946 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1947 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1948 &rand); 1949 } 1950 break; 1951 case RTWS_POLL_GET_EXP: 1952 gp_snap = cur_ops->start_gp_poll_exp(); 1953 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1954 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1955 &rand); 1956 } 1957 break; 1958 case RTWS_POLL_GET_EXP_FULL: 1959 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1960 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1961 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1962 &rand); 1963 } 1964 break; 1965 case RTWS_SYNC: 1966 cur_ops->sync(); 1967 break; 1968 default: 1969 WARN_ON_ONCE(1); 1970 break; 1971 } 1972 } 1973 stutter_wait("rcu_torture_fakewriter"); 1974 } while (!torture_must_stop()); 1975 1976 torture_kthread_stopping("rcu_torture_fakewriter"); 1977 return 0; 1978 } 1979 1980 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1981 { 1982 kfree(rhp); 1983 } 1984 1985 // Set up and carry out testing of RCU's global memory ordering 1986 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1987 struct torture_random_state *trsp) 1988 { 1989 unsigned long loops; 1990 int noc = torture_num_online_cpus(); 1991 int rdrchked; 1992 int rdrchker; 1993 struct rcu_torture_reader_check *rtrcp; // Me. 1994 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1995 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1996 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1997 1998 if (myid < 0) 1999 return; // Don't try this from timer handlers. 2000 2001 // Increment my counter. 2002 rtrcp = &rcu_torture_reader_mbchk[myid]; 2003 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 2004 2005 // Attempt to assign someone else some checking work. 2006 rdrchked = torture_random(trsp) % nrealreaders; 2007 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 2008 rdrchker = torture_random(trsp) % nrealreaders; 2009 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 2010 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 2011 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 2012 !READ_ONCE(rtp->rtort_chkp) && 2013 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 2014 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 2015 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 2016 rtrcp->rtc_chkrdr = rdrchked; 2017 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 2018 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 2019 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 2020 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 2021 } 2022 2023 // If assigned some completed work, do it! 2024 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 2025 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 2026 return; // No work or work not yet ready. 2027 rdrchked = rtrcp_assigner->rtc_chkrdr; 2028 if (WARN_ON_ONCE(rdrchked < 0)) 2029 return; 2030 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 2031 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 2032 atomic_inc(&n_rcu_torture_mbchk_tries); 2033 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 2034 atomic_inc(&n_rcu_torture_mbchk_fail); 2035 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 2036 rtrcp_assigner->rtc_ready = 0; 2037 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 2038 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 2039 } 2040 2041 // Verify the specified RCUTORTURE_RDR* state. 2042 #define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count() 2043 static void rcutorture_one_extend_check(char *s, int curstate, int new, int old) 2044 { 2045 int mask; 2046 2047 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE) || in_nmi()) 2048 return; 2049 2050 WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled() && !in_hardirq(), ROEC_ARGS); 2051 WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS); 2052 2053 // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable. 2054 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 2055 return; 2056 2057 WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 2058 !softirq_count(), ROEC_ARGS); 2059 WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) && 2060 !(preempt_count() & PREEMPT_MASK), ROEC_ARGS); 2061 WARN_ONCE(cur_ops->readlock_nesting && 2062 (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) && 2063 cur_ops->readlock_nesting() == 0, ROEC_ARGS); 2064 2065 // Interrupt handlers have all sorts of stuff disabled, so ignore 2066 // unintended disabling. 2067 if (in_serving_softirq() || in_hardirq()) 2068 return; 2069 2070 WARN_ONCE(cur_ops->extendables && 2071 !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 2072 softirq_count(), ROEC_ARGS); 2073 2074 /* 2075 * non-preemptible RCU in a preemptible kernel uses preempt_disable() 2076 * as rcu_read_lock(). 2077 */ 2078 mask = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2079 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 2080 mask |= RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2081 2082 WARN_ONCE(cur_ops->extendables && !(curstate & mask) && 2083 (preempt_count() & PREEMPT_MASK), ROEC_ARGS); 2084 2085 /* 2086 * non-preemptible RCU in a preemptible kernel uses "preempt_count() & 2087 * PREEMPT_MASK" as ->readlock_nesting(). 2088 */ 2089 mask = RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2090 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 2091 mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2092 2093 if (IS_ENABLED(CONFIG_PREEMPT_RT) && softirq_count()) 2094 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2095 2096 WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) && 2097 cur_ops->readlock_nesting() > 0, ROEC_ARGS); 2098 } 2099 2100 /* 2101 * Do one extension of an RCU read-side critical section using the 2102 * current reader state in readstate (set to zero for initial entry 2103 * to extended critical section), set the new state as specified by 2104 * newstate (set to zero for final exit from extended critical section), 2105 * and random-number-generator state in trsp. If this is neither the 2106 * beginning or end of the critical section and if there was actually a 2107 * change, do a ->read_delay(). 2108 */ 2109 static void rcutorture_one_extend(int *readstate, int newstate, struct torture_random_state *trsp, 2110 struct rt_read_seg *rtrsp) 2111 { 2112 bool first; 2113 unsigned long flags; 2114 int idxnew1 = -1; 2115 int idxnew2 = -1; 2116 int idxold1 = *readstate; 2117 int idxold2 = idxold1; 2118 int statesnew = ~*readstate & newstate; 2119 int statesold = *readstate & ~newstate; 2120 2121 first = idxold1 == 0; 2122 WARN_ON_ONCE(idxold2 < 0); 2123 WARN_ON_ONCE(idxold2 & ~(RCUTORTURE_RDR_ALLBITS | RCUTORTURE_RDR_UPDOWN)); 2124 rcutorture_one_extend_check("before change", idxold1, statesnew, statesold); 2125 rtrsp->rt_readstate = newstate; 2126 2127 /* First, put new protection in place to avoid critical-section gap. */ 2128 if (statesnew & RCUTORTURE_RDR_BH) 2129 local_bh_disable(); 2130 if (statesnew & RCUTORTURE_RDR_RBH) 2131 rcu_read_lock_bh(); 2132 if (statesnew & RCUTORTURE_RDR_IRQ) 2133 local_irq_disable(); 2134 if (statesnew & RCUTORTURE_RDR_PREEMPT) 2135 preempt_disable(); 2136 if (statesnew & RCUTORTURE_RDR_SCHED) 2137 rcu_read_lock_sched(); 2138 if (statesnew & RCUTORTURE_RDR_RCU_1) 2139 idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 2140 if (statesnew & RCUTORTURE_RDR_RCU_2) 2141 idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2; 2142 2143 // Complain unless both the old and the new protection is in place. 2144 rcutorture_one_extend_check("during change", idxold1 | statesnew, statesnew, statesold); 2145 2146 // Sample CPU under both sets of protections to reduce confusion. 2147 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 2148 int cpu = raw_smp_processor_id(); 2149 rtrsp->rt_cpu = cpu; 2150 if (!first) { 2151 rtrsp[-1].rt_end_cpu = cpu; 2152 if (cur_ops->reader_blocked) 2153 rtrsp[-1].rt_preempted = cur_ops->reader_blocked(); 2154 } 2155 } 2156 // Sample grace-period sequence number, as good a place as any. 2157 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) { 2158 rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs(); 2159 rtrsp->rt_ts = ktime_get_mono_fast_ns(); 2160 if (!first) 2161 rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq; 2162 } 2163 2164 /* 2165 * Next, remove old protection, in decreasing order of strength 2166 * to avoid unlock paths that aren't safe in the stronger 2167 * context. Namely: BH can not be enabled with disabled interrupts. 2168 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 2169 * context. 2170 */ 2171 if (statesold & RCUTORTURE_RDR_IRQ) 2172 local_irq_enable(); 2173 if (statesold & RCUTORTURE_RDR_PREEMPT) 2174 preempt_enable(); 2175 if (statesold & RCUTORTURE_RDR_SCHED) 2176 rcu_read_unlock_sched(); 2177 if (statesold & RCUTORTURE_RDR_BH) 2178 local_bh_enable(); 2179 if (statesold & RCUTORTURE_RDR_RBH) 2180 rcu_read_unlock_bh(); 2181 if (statesold & RCUTORTURE_RDR_RCU_2) { 2182 cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2); 2183 WARN_ON_ONCE(idxnew2 != -1); 2184 idxold2 = 0; 2185 } 2186 if (statesold & RCUTORTURE_RDR_RCU_1) { 2187 bool lockit; 2188 2189 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 2190 if (lockit) 2191 raw_spin_lock_irqsave(¤t->pi_lock, flags); 2192 cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 2193 WARN_ON_ONCE(idxnew1 != -1); 2194 idxold1 = 0; 2195 if (lockit) 2196 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 2197 } 2198 if (statesold & RCUTORTURE_RDR_UPDOWN) { 2199 cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 2200 WARN_ON_ONCE(idxnew1 != -1); 2201 idxold1 = 0; 2202 } 2203 2204 /* Delay if neither beginning nor end and there was a change. */ 2205 if ((statesnew || statesold) && *readstate && newstate) 2206 cur_ops->read_delay(trsp, rtrsp); 2207 2208 /* Update the reader state. */ 2209 if (idxnew1 == -1) 2210 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 2211 WARN_ON_ONCE(idxnew1 < 0); 2212 if (idxnew2 == -1) 2213 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 2214 WARN_ON_ONCE(idxnew2 < 0); 2215 *readstate = idxnew1 | idxnew2 | newstate; 2216 WARN_ON_ONCE(*readstate < 0); 2217 if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS)) 2218 pr_info("Unexpected readstate value of %#x\n", *readstate); 2219 rcutorture_one_extend_check("after change", *readstate, statesnew, statesold); 2220 } 2221 2222 /* Return the biggest extendables mask given current RCU and boot parameters. */ 2223 static int rcutorture_extend_mask_max(void) 2224 { 2225 int mask; 2226 2227 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 2228 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 2229 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2230 return mask; 2231 } 2232 2233 /* Return a random protection state mask, but with at least one bit set. */ 2234 static int 2235 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 2236 { 2237 int mask = rcutorture_extend_mask_max(); 2238 unsigned long randmask1 = torture_random(trsp); 2239 unsigned long randmask2 = randmask1 >> 3; 2240 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2241 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 2242 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2243 2244 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits. 2245 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 2246 if (!(randmask1 & 0x7)) 2247 mask = mask & randmask2; 2248 else 2249 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 2250 2251 // Can't have nested RCU reader without outer RCU reader. 2252 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 2253 if (oldmask & RCUTORTURE_RDR_RCU_1) 2254 mask &= ~RCUTORTURE_RDR_RCU_2; 2255 else 2256 mask |= RCUTORTURE_RDR_RCU_1; 2257 } 2258 2259 /* 2260 * Can't enable bh w/irq disabled. 2261 */ 2262 if (mask & RCUTORTURE_RDR_IRQ) 2263 mask |= oldmask & bhs; 2264 2265 /* 2266 * Ideally these sequences would be detected in debug builds 2267 * (regardless of RT), but until then don't stop testing 2268 * them on non-RT. 2269 */ 2270 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 2271 /* Can't modify BH in atomic context */ 2272 if (oldmask & preempts_irq) 2273 mask &= ~bhs; 2274 if ((oldmask | mask) & preempts_irq) 2275 mask |= oldmask & bhs; 2276 } 2277 2278 return mask ?: RCUTORTURE_RDR_RCU_1; 2279 } 2280 2281 /* 2282 * Do a randomly selected number of extensions of an existing RCU read-side 2283 * critical section. 2284 */ 2285 static struct rt_read_seg * 2286 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, struct rt_read_seg *rtrsp) 2287 { 2288 int i; 2289 int j; 2290 int mask = rcutorture_extend_mask_max(); 2291 2292 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 2293 if (!((mask - 1) & mask)) 2294 return rtrsp; /* Current RCU reader not extendable. */ 2295 /* Bias towards larger numbers of loops. */ 2296 i = torture_random(trsp); 2297 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 2298 for (j = 0; j < i; j++) { 2299 mask = rcutorture_extend_mask(*readstate, trsp); 2300 WARN_ON_ONCE(mask & RCUTORTURE_RDR_UPDOWN); 2301 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 2302 } 2303 return &rtrsp[j]; 2304 } 2305 2306 struct rcu_torture_one_read_state { 2307 bool checkpolling; 2308 unsigned long cookie; 2309 struct rcu_gp_oldstate cookie_full; 2310 unsigned long started; 2311 struct rcu_torture *p; 2312 int readstate; 2313 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS]; 2314 struct rt_read_seg *rtrsp; 2315 unsigned long long ts; 2316 }; 2317 2318 static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp, 2319 struct torture_random_state *trsp) 2320 { 2321 memset(rtorsp, 0, sizeof(*rtorsp)); 2322 rtorsp->checkpolling = !(torture_random(trsp) & 0xfff); 2323 rtorsp->rtrsp = &rtorsp->rtseg[0]; 2324 } 2325 2326 /* 2327 * Set up the first segment of a series of overlapping read-side 2328 * critical sections. The caller must have actually initiated the 2329 * outermost read-side critical section. 2330 */ 2331 static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp, 2332 struct torture_random_state *trsp, long myid) 2333 { 2334 if (rtorsp->checkpolling) { 2335 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2336 rtorsp->cookie = cur_ops->get_gp_state(); 2337 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2338 cur_ops->get_gp_state_full(&rtorsp->cookie_full); 2339 } 2340 rtorsp->started = cur_ops->get_gp_seq(); 2341 rtorsp->ts = rcu_trace_clock_local(); 2342 rtorsp->p = rcu_dereference_check(rcu_torture_current, 2343 !cur_ops->readlock_held || cur_ops->readlock_held() || 2344 (rtorsp->readstate & RCUTORTURE_RDR_UPDOWN)); 2345 if (rtorsp->p == NULL) { 2346 /* Wait for rcu_torture_writer to get underway */ 2347 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp); 2348 return false; 2349 } 2350 if (rtorsp->p->rtort_mbtest == 0) 2351 atomic_inc(&n_rcu_torture_mberror); 2352 rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp); 2353 return true; 2354 } 2355 2356 /* 2357 * Complete the last segment of a series of overlapping read-side 2358 * critical sections and check for errors. 2359 */ 2360 static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp, 2361 struct torture_random_state *trsp) 2362 { 2363 int i; 2364 unsigned long completed; 2365 int pipe_count; 2366 bool preempted = false; 2367 struct rt_read_seg *rtrsp1; 2368 2369 preempt_disable(); 2370 pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count); 2371 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2372 // Should not happen in a correct RCU implementation, 2373 // happens quite often for torture_type=busted. 2374 pipe_count = RCU_TORTURE_PIPE_LEN; 2375 } 2376 completed = cur_ops->get_gp_seq(); 2377 if (pipe_count > 1) { 2378 do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu, 2379 rtorsp->ts, rtorsp->started, completed); 2380 rcu_ftrace_dump(DUMP_ALL); 2381 } 2382 __this_cpu_inc(rcu_torture_count[pipe_count]); 2383 completed = rcutorture_seq_diff(completed, rtorsp->started); 2384 if (completed > RCU_TORTURE_PIPE_LEN) { 2385 /* Should not happen, but... */ 2386 completed = RCU_TORTURE_PIPE_LEN; 2387 } 2388 __this_cpu_inc(rcu_torture_batch[completed]); 2389 preempt_enable(); 2390 if (rtorsp->checkpolling) { 2391 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2392 WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie), 2393 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2394 __func__, 2395 rcu_torture_writer_state_getname(), 2396 rcu_torture_writer_state, 2397 rtorsp->cookie, cur_ops->get_gp_state()); 2398 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2399 WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full), 2400 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2401 __func__, 2402 rcu_torture_writer_state_getname(), 2403 rcu_torture_writer_state, 2404 cpumask_pr_args(cpu_online_mask)); 2405 } 2406 if (cur_ops->reader_blocked) 2407 preempted = cur_ops->reader_blocked(); 2408 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp); 2409 WARN_ON_ONCE(rtorsp->readstate); 2410 // This next splat is expected behavior if leakpointer, especially 2411 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2412 WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1); 2413 2414 /* If error or close call, record the sequence of reader protections. */ 2415 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2416 i = 0; 2417 for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++) 2418 err_segs[i++] = *rtrsp1; 2419 rt_read_nsegs = i; 2420 rt_read_preempted = preempted; 2421 } 2422 } 2423 2424 /* 2425 * Do one read-side critical section, returning false if there was 2426 * no data to read. Can be invoked both from process context and 2427 * from a timer handler. 2428 */ 2429 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 2430 { 2431 int newstate; 2432 struct rcu_torture_one_read_state rtors; 2433 2434 WARN_ON_ONCE(!rcu_is_watching()); 2435 init_rcu_torture_one_read_state(&rtors, trsp); 2436 newstate = rcutorture_extend_mask(rtors.readstate, trsp); 2437 WARN_ON_ONCE(newstate & RCUTORTURE_RDR_UPDOWN); 2438 rcutorture_one_extend(&rtors.readstate, newstate, trsp, rtors.rtrsp++); 2439 if (!rcu_torture_one_read_start(&rtors, trsp, myid)) 2440 return false; 2441 rtors.rtrsp = rcutorture_loop_extend(&rtors.readstate, trsp, rtors.rtrsp); 2442 rcu_torture_one_read_end(&rtors, trsp); 2443 return true; 2444 } 2445 2446 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2447 2448 /* 2449 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2450 * incrementing the corresponding element of the pipeline array. The 2451 * counter in the element should never be greater than 1, otherwise, the 2452 * RCU implementation is broken. 2453 */ 2454 static void rcu_torture_timer(struct timer_list *unused) 2455 { 2456 atomic_long_inc(&n_rcu_torture_timers); 2457 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2458 2459 /* Test call_rcu() invocation from interrupt handler. */ 2460 if (cur_ops->call) { 2461 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2462 2463 if (rhp) 2464 cur_ops->call(rhp, rcu_torture_timer_cb); 2465 } 2466 } 2467 2468 /* 2469 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2470 * incrementing the corresponding element of the pipeline array. The 2471 * counter in the element should never be greater than 1, otherwise, the 2472 * RCU implementation is broken. 2473 */ 2474 static int 2475 rcu_torture_reader(void *arg) 2476 { 2477 unsigned long lastsleep = jiffies; 2478 long myid = (long)arg; 2479 int mynumonline = myid; 2480 DEFINE_TORTURE_RANDOM(rand); 2481 struct timer_list t; 2482 2483 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2484 set_user_nice(current, MAX_NICE); 2485 if (irqreader && cur_ops->irq_capable) 2486 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2487 tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. 2488 do { 2489 if (irqreader && cur_ops->irq_capable) { 2490 if (!timer_pending(&t)) 2491 mod_timer(&t, jiffies + 1); 2492 } 2493 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2494 schedule_timeout_interruptible(HZ); 2495 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2496 torture_hrtimeout_us(500, 1000, &rand); 2497 lastsleep = jiffies + 10; 2498 } 2499 while (!torture_must_stop() && 2500 (torture_num_online_cpus() < mynumonline || !rcu_inkernel_boot_has_ended())) 2501 schedule_timeout_interruptible(HZ / 5); 2502 stutter_wait("rcu_torture_reader"); 2503 } while (!torture_must_stop()); 2504 if (irqreader && cur_ops->irq_capable) { 2505 timer_delete_sync(&t); 2506 timer_destroy_on_stack(&t); 2507 } 2508 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2509 torture_kthread_stopping("rcu_torture_reader"); 2510 return 0; 2511 } 2512 2513 struct rcu_torture_one_read_state_updown { 2514 struct hrtimer rtorsu_hrt; 2515 bool rtorsu_inuse; 2516 ktime_t rtorsu_kt; 2517 int rtorsu_cpu; 2518 unsigned long rtorsu_j; 2519 unsigned long rtorsu_ndowns; 2520 unsigned long rtorsu_nups; 2521 unsigned long rtorsu_nmigrates; 2522 struct torture_random_state rtorsu_trs; 2523 struct rcu_torture_one_read_state rtorsu_rtors; 2524 }; 2525 2526 static struct rcu_torture_one_read_state_updown *updownreaders; 2527 static DEFINE_TORTURE_RANDOM(rcu_torture_updown_rand); 2528 static int rcu_torture_updown(void *arg); 2529 2530 static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp) 2531 { 2532 int cpu = raw_smp_processor_id(); 2533 struct rcu_torture_one_read_state_updown *rtorsup; 2534 2535 rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt); 2536 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2537 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2538 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2539 WRITE_ONCE(rtorsup->rtorsu_nmigrates, 2540 rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu)); 2541 smp_store_release(&rtorsup->rtorsu_inuse, false); 2542 return HRTIMER_NORESTART; 2543 } 2544 2545 static int rcu_torture_updown_init(void) 2546 { 2547 int i; 2548 struct torture_random_state *rand = &rcu_torture_updown_rand; 2549 int ret; 2550 2551 if (n_up_down < 0) 2552 return 0; 2553 if (!srcu_torture_have_up_down()) { 2554 VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives"); 2555 return 0; 2556 } 2557 updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL); 2558 if (!updownreaders) { 2559 VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests"); 2560 return -ENOMEM; 2561 } 2562 for (i = 0; i < n_up_down; i++) { 2563 init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, rand); 2564 hrtimer_setup(&updownreaders[i].rtorsu_hrt, rcu_torture_updown_hrt, CLOCK_MONOTONIC, 2565 HRTIMER_MODE_REL | HRTIMER_MODE_HARD); 2566 torture_random_init(&updownreaders[i].rtorsu_trs); 2567 init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, 2568 &updownreaders[i].rtorsu_trs); 2569 } 2570 ret = torture_create_kthread(rcu_torture_updown, rand, updown_task); 2571 if (ret) { 2572 kfree(updownreaders); 2573 updownreaders = NULL; 2574 } 2575 return ret; 2576 } 2577 2578 static void rcu_torture_updown_cleanup(void) 2579 { 2580 struct rcu_torture_one_read_state_updown *rtorsup; 2581 2582 for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { 2583 if (!smp_load_acquire(&rtorsup->rtorsu_inuse)) 2584 continue; 2585 if (hrtimer_cancel(&rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) { 2586 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2587 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2588 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2589 smp_store_release(&rtorsup->rtorsu_inuse, false); 2590 } 2591 2592 } 2593 kfree(updownreaders); 2594 updownreaders = NULL; 2595 } 2596 2597 // Do one reader for rcu_torture_updown(). 2598 static void rcu_torture_updown_one(struct rcu_torture_one_read_state_updown *rtorsup) 2599 { 2600 int idx; 2601 int rawidx; 2602 ktime_t t; 2603 2604 init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2605 rawidx = cur_ops->down_read(); 2606 WRITE_ONCE(rtorsup->rtorsu_ndowns, rtorsup->rtorsu_ndowns + 1); 2607 idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 2608 rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN; 2609 rtorsup->rtorsu_rtors.rtrsp++; 2610 rtorsup->rtorsu_cpu = raw_smp_processor_id(); 2611 if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1)) { 2612 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2613 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2614 schedule_timeout_idle(HZ); 2615 return; 2616 } 2617 smp_store_release(&rtorsup->rtorsu_inuse, true); 2618 t = torture_random(&rtorsup->rtorsu_trs) & 0xfffff; // One per million. 2619 if (t < 10 * 1000) 2620 t = 200 * 1000 * 1000; 2621 hrtimer_start(&rtorsup->rtorsu_hrt, t, HRTIMER_MODE_REL | HRTIMER_MODE_HARD); 2622 smp_mb(); // Sample jiffies after posting hrtimer. 2623 rtorsup->rtorsu_j = jiffies; // Not used by hrtimer handler. 2624 rtorsup->rtorsu_kt = t; 2625 } 2626 2627 /* 2628 * RCU torture up/down reader kthread, starting RCU readers in kthread 2629 * context and ending them in hrtimer handlers. Otherwise similar to 2630 * rcu_torture_reader(). 2631 */ 2632 static int 2633 rcu_torture_updown(void *arg) 2634 { 2635 unsigned long j; 2636 struct rcu_torture_one_read_state_updown *rtorsup; 2637 2638 VERBOSE_TOROUT_STRING("rcu_torture_updown task started"); 2639 do { 2640 for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { 2641 if (torture_must_stop()) 2642 break; 2643 j = smp_load_acquire(&jiffies); // Time before ->rtorsu_inuse. 2644 if (smp_load_acquire(&rtorsup->rtorsu_inuse)) { 2645 WARN_ONCE(time_after(j, rtorsup->rtorsu_j + 1 + HZ * 10), 2646 "hrtimer queued at jiffies %lu for %lld ns took %lu jiffies\n", rtorsup->rtorsu_j, rtorsup->rtorsu_kt, j - rtorsup->rtorsu_j); 2647 continue; 2648 } 2649 rcu_torture_updown_one(rtorsup); 2650 } 2651 torture_hrtimeout_ms(1, 1000, &rcu_torture_updown_rand); 2652 stutter_wait("rcu_torture_updown"); 2653 } while (!torture_must_stop()); 2654 rcu_torture_updown_cleanup(); 2655 torture_kthread_stopping("rcu_torture_updown"); 2656 return 0; 2657 } 2658 2659 /* 2660 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2661 * increase race probabilities and fuzzes the interval between toggling. 2662 */ 2663 static int rcu_nocb_toggle(void *arg) 2664 { 2665 int cpu; 2666 int maxcpu = -1; 2667 int oldnice = task_nice(current); 2668 long r; 2669 DEFINE_TORTURE_RANDOM(rand); 2670 ktime_t toggle_delay; 2671 unsigned long toggle_fuzz; 2672 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2673 2674 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2675 while (!rcu_inkernel_boot_has_ended()) 2676 schedule_timeout_interruptible(HZ / 10); 2677 for_each_possible_cpu(cpu) 2678 maxcpu = cpu; 2679 WARN_ON(maxcpu < 0); 2680 if (toggle_interval > ULONG_MAX) 2681 toggle_fuzz = ULONG_MAX >> 3; 2682 else 2683 toggle_fuzz = toggle_interval >> 3; 2684 if (toggle_fuzz <= 0) 2685 toggle_fuzz = NSEC_PER_USEC; 2686 do { 2687 r = torture_random(&rand); 2688 cpu = (r >> 1) % (maxcpu + 1); 2689 if (r & 0x1) { 2690 rcu_nocb_cpu_offload(cpu); 2691 atomic_long_inc(&n_nocb_offload); 2692 } else { 2693 rcu_nocb_cpu_deoffload(cpu); 2694 atomic_long_inc(&n_nocb_deoffload); 2695 } 2696 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2697 set_current_state(TASK_INTERRUPTIBLE); 2698 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2699 if (stutter_wait("rcu_nocb_toggle")) 2700 sched_set_normal(current, oldnice); 2701 } while (!torture_must_stop()); 2702 torture_kthread_stopping("rcu_nocb_toggle"); 2703 return 0; 2704 } 2705 2706 /* 2707 * Print torture statistics. Caller must ensure that there is only 2708 * one call to this function at a given time!!! This is normally 2709 * accomplished by relying on the module system to only have one copy 2710 * of the module loaded, and then by giving the rcu_torture_stats 2711 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2712 * thread is not running). 2713 */ 2714 static void 2715 rcu_torture_stats_print(void) 2716 { 2717 int cpu; 2718 int i; 2719 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2720 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2721 long n_gpwraps = 0; 2722 unsigned long ndowns = 0; 2723 unsigned long nunexpired = 0; 2724 unsigned long nmigrates = 0; 2725 unsigned long nups = 0; 2726 struct rcu_torture *rtcp; 2727 static unsigned long rtcv_snap = ULONG_MAX; 2728 static bool splatted; 2729 struct task_struct *wtp; 2730 2731 for_each_possible_cpu(cpu) { 2732 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2733 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2734 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2735 } 2736 if (cur_ops->get_gpwrap_count) 2737 n_gpwraps += cur_ops->get_gpwrap_count(cpu); 2738 } 2739 if (updownreaders) { 2740 for (i = 0; i < n_up_down; i++) { 2741 ndowns += READ_ONCE(updownreaders[i].rtorsu_ndowns); 2742 nups += READ_ONCE(updownreaders[i].rtorsu_nups); 2743 nunexpired += READ_ONCE(updownreaders[i].rtorsu_inuse); 2744 nmigrates += READ_ONCE(updownreaders[i].rtorsu_nmigrates); 2745 } 2746 } 2747 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2748 if (pipesummary[i] != 0) 2749 break; 2750 } // The value of variable "i" is used later, so don't clobber it! 2751 2752 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2753 rtcp = rcu_access_pointer(rcu_torture_current); 2754 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2755 rtcp, 2756 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2757 rcu_torture_current_version, 2758 list_empty(&rcu_torture_freelist), 2759 atomic_read(&n_rcu_torture_alloc), 2760 atomic_read(&n_rcu_torture_alloc_fail), 2761 atomic_read(&n_rcu_torture_free)); 2762 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2763 atomic_read(&n_rcu_torture_mberror), 2764 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2765 n_rcu_torture_barrier_error, 2766 n_rcu_torture_boost_ktrerror); 2767 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2768 n_rcu_torture_boost_failure, 2769 n_rcu_torture_boosts, 2770 atomic_long_read(&n_rcu_torture_timers)); 2771 if (updownreaders) 2772 pr_cont("ndowns: %lu nups: %lu nhrt: %lu nmigrates: %lu ", ndowns, nups, nunexpired, nmigrates); 2773 torture_onoff_stats(); 2774 pr_cont("barrier: %ld/%ld:%ld ", 2775 data_race(n_barrier_successes), 2776 data_race(n_barrier_attempts), 2777 data_race(n_rcu_torture_barrier_error)); 2778 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2779 pr_cont("nocb-toggles: %ld:%ld ", 2780 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2781 pr_cont("gpwraps: %ld\n", n_gpwraps); 2782 2783 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2784 if (atomic_read(&n_rcu_torture_mberror) || 2785 atomic_read(&n_rcu_torture_mbchk_fail) || 2786 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2787 n_rcu_torture_boost_failure || i > 1) { 2788 pr_cont("%s", "!!! "); 2789 atomic_inc(&n_rcu_torture_error); 2790 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2791 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2792 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2793 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2794 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2795 WARN_ON_ONCE(i > 1); // Too-short grace period 2796 } 2797 pr_cont("Reader Pipe: "); 2798 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2799 pr_cont(" %ld", pipesummary[i]); 2800 pr_cont("\n"); 2801 2802 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2803 pr_cont("Reader Batch: "); 2804 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2805 pr_cont(" %ld", batchsummary[i]); 2806 pr_cont("\n"); 2807 2808 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2809 pr_cont("Free-Block Circulation: "); 2810 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2811 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2812 } 2813 pr_cont("\n"); 2814 2815 if (cur_ops->stats) 2816 cur_ops->stats(); 2817 if (rtcv_snap == rcu_torture_current_version && 2818 rcu_access_pointer(rcu_torture_current) && 2819 !rcu_stall_is_suppressed() && 2820 rcu_inkernel_boot_has_ended()) { 2821 int __maybe_unused flags = 0; 2822 unsigned long __maybe_unused gp_seq = 0; 2823 2824 if (cur_ops->get_gp_data) 2825 cur_ops->get_gp_data(&flags, &gp_seq); 2826 wtp = READ_ONCE(writer_task); 2827 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2828 rcu_torture_writer_state_getname(), 2829 rcu_torture_writer_state, gp_seq, flags, 2830 wtp == NULL ? ~0U : wtp->__state, 2831 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2832 if (!splatted && wtp) { 2833 sched_show_task(wtp); 2834 splatted = true; 2835 } 2836 if (cur_ops->gp_kthread_dbg) 2837 cur_ops->gp_kthread_dbg(); 2838 rcu_ftrace_dump(DUMP_ALL); 2839 } 2840 rtcv_snap = rcu_torture_current_version; 2841 } 2842 2843 /* 2844 * Periodically prints torture statistics, if periodic statistics printing 2845 * was specified via the stat_interval module parameter. 2846 */ 2847 static int 2848 rcu_torture_stats(void *arg) 2849 { 2850 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2851 do { 2852 schedule_timeout_interruptible(stat_interval * HZ); 2853 rcu_torture_stats_print(); 2854 torture_shutdown_absorb("rcu_torture_stats"); 2855 } while (!torture_must_stop()); 2856 torture_kthread_stopping("rcu_torture_stats"); 2857 return 0; 2858 } 2859 2860 /* Test mem_dump_obj() and friends. */ 2861 static void rcu_torture_mem_dump_obj(void) 2862 { 2863 struct rcu_head *rhp; 2864 struct kmem_cache *kcp; 2865 static int z; 2866 2867 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2868 if (WARN_ON_ONCE(!kcp)) 2869 return; 2870 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2871 if (WARN_ON_ONCE(!rhp)) { 2872 kmem_cache_destroy(kcp); 2873 return; 2874 } 2875 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2876 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2877 mem_dump_obj(ZERO_SIZE_PTR); 2878 pr_alert("mem_dump_obj(NULL):"); 2879 mem_dump_obj(NULL); 2880 pr_alert("mem_dump_obj(%px):", &rhp); 2881 mem_dump_obj(&rhp); 2882 pr_alert("mem_dump_obj(%px):", rhp); 2883 mem_dump_obj(rhp); 2884 pr_alert("mem_dump_obj(%px):", &rhp->func); 2885 mem_dump_obj(&rhp->func); 2886 pr_alert("mem_dump_obj(%px):", &z); 2887 mem_dump_obj(&z); 2888 kmem_cache_free(kcp, rhp); 2889 kmem_cache_destroy(kcp); 2890 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2891 if (WARN_ON_ONCE(!rhp)) 2892 return; 2893 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2894 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2895 mem_dump_obj(rhp); 2896 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2897 mem_dump_obj(&rhp->func); 2898 kfree(rhp); 2899 rhp = vmalloc(4096); 2900 if (WARN_ON_ONCE(!rhp)) 2901 return; 2902 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2903 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2904 mem_dump_obj(rhp); 2905 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2906 mem_dump_obj(&rhp->func); 2907 vfree(rhp); 2908 } 2909 2910 static void 2911 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2912 { 2913 pr_alert("%s" TORTURE_FLAG 2914 "--- %s: nreaders=%d nfakewriters=%d " 2915 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2916 "shuffle_interval=%d stutter=%d irqreader=%d " 2917 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2918 "test_boost=%d/%d test_boost_interval=%d " 2919 "test_boost_duration=%d test_boost_holdoff=%d shutdown_secs=%d " 2920 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2921 "stall_cpu_block=%d stall_cpu_repeat=%d " 2922 "n_barrier_cbs=%d " 2923 "onoff_interval=%d onoff_holdoff=%d " 2924 "read_exit_delay=%d read_exit_burst=%d " 2925 "reader_flavor=%x " 2926 "nocbs_nthreads=%d nocbs_toggle=%d " 2927 "test_nmis=%d " 2928 "preempt_duration=%d preempt_interval=%d n_up_down=%d\n", 2929 torture_type, tag, nrealreaders, nrealfakewriters, 2930 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2931 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2932 test_boost, cur_ops->can_boost, 2933 test_boost_interval, test_boost_duration, test_boost_holdoff, shutdown_secs, 2934 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2935 stall_cpu_block, stall_cpu_repeat, 2936 n_barrier_cbs, 2937 onoff_interval, onoff_holdoff, 2938 read_exit_delay, read_exit_burst, 2939 reader_flavor, 2940 nocbs_nthreads, nocbs_toggle, 2941 test_nmis, 2942 preempt_duration, preempt_interval, n_up_down); 2943 } 2944 2945 static int rcutorture_booster_cleanup(unsigned int cpu) 2946 { 2947 struct task_struct *t; 2948 2949 if (boost_tasks[cpu] == NULL) 2950 return 0; 2951 mutex_lock(&boost_mutex); 2952 t = boost_tasks[cpu]; 2953 boost_tasks[cpu] = NULL; 2954 rcu_torture_enable_rt_throttle(); 2955 mutex_unlock(&boost_mutex); 2956 2957 /* This must be outside of the mutex, otherwise deadlock! */ 2958 torture_stop_kthread(rcu_torture_boost, t); 2959 return 0; 2960 } 2961 2962 static int rcutorture_booster_init(unsigned int cpu) 2963 { 2964 int retval; 2965 2966 if (boost_tasks[cpu] != NULL) 2967 return 0; /* Already created, nothing more to do. */ 2968 2969 // Testing RCU priority boosting requires rcutorture do 2970 // some serious abuse. Counter this by running ksoftirqd 2971 // at higher priority. 2972 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2973 struct sched_param sp; 2974 struct task_struct *t; 2975 2976 t = per_cpu(ksoftirqd, cpu); 2977 WARN_ON_ONCE(!t); 2978 sp.sched_priority = 2; 2979 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2980 #ifdef CONFIG_IRQ_FORCED_THREADING 2981 if (force_irqthreads()) { 2982 t = per_cpu(ktimerd, cpu); 2983 WARN_ON_ONCE(!t); 2984 sp.sched_priority = 2; 2985 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2986 } 2987 #endif 2988 } 2989 2990 /* Don't allow time recalculation while creating a new task. */ 2991 mutex_lock(&boost_mutex); 2992 rcu_torture_disable_rt_throttle(); 2993 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2994 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2995 cpu, "rcu_torture_boost_%u"); 2996 if (IS_ERR(boost_tasks[cpu])) { 2997 retval = PTR_ERR(boost_tasks[cpu]); 2998 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2999 n_rcu_torture_boost_ktrerror++; 3000 boost_tasks[cpu] = NULL; 3001 mutex_unlock(&boost_mutex); 3002 return retval; 3003 } 3004 mutex_unlock(&boost_mutex); 3005 return 0; 3006 } 3007 3008 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 3009 { 3010 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 3011 return NOTIFY_OK; 3012 } 3013 3014 static struct notifier_block rcu_torture_stall_block = { 3015 .notifier_call = rcu_torture_stall_nf, 3016 }; 3017 3018 /* 3019 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 3020 * induces a CPU stall for the time specified by stall_cpu. If a new 3021 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 3022 */ 3023 static void rcu_torture_stall_one(int rep, int irqsoff) 3024 { 3025 int idx; 3026 unsigned long stop_at; 3027 3028 if (stall_cpu_holdoff > 0) { 3029 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 3030 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 3031 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 3032 } 3033 if (!kthread_should_stop() && stall_gp_kthread > 0) { 3034 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 3035 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 3036 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 3037 if (kthread_should_stop()) 3038 break; 3039 schedule_timeout_uninterruptible(HZ); 3040 } 3041 } 3042 if (!kthread_should_stop() && stall_cpu > 0) { 3043 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 3044 stop_at = ktime_get_seconds() + stall_cpu; 3045 /* RCU CPU stall is expected behavior in following code. */ 3046 idx = cur_ops->readlock(); 3047 if (irqsoff) 3048 local_irq_disable(); 3049 else if (!stall_cpu_block) 3050 preempt_disable(); 3051 pr_alert("%s start stall episode %d on CPU %d.\n", 3052 __func__, rep + 1, raw_smp_processor_id()); 3053 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 3054 !kthread_should_stop()) 3055 if (stall_cpu_block) { 3056 #ifdef CONFIG_PREEMPTION 3057 preempt_schedule(); 3058 #else 3059 schedule_timeout_uninterruptible(HZ); 3060 #endif 3061 } else if (stall_no_softlockup) { 3062 touch_softlockup_watchdog(); 3063 } 3064 if (irqsoff) 3065 local_irq_enable(); 3066 else if (!stall_cpu_block) 3067 preempt_enable(); 3068 cur_ops->readunlock(idx); 3069 } 3070 } 3071 3072 /* 3073 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many 3074 * additional times as specified by the stall_cpu_repeat module parameter. 3075 * Note that stall_cpu_irqsoff is ignored on the second and subsequent 3076 * stall. 3077 */ 3078 static int rcu_torture_stall(void *args) 3079 { 3080 int i; 3081 int repeat = stall_cpu_repeat; 3082 int ret; 3083 3084 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 3085 if (repeat < 0) { 3086 repeat = 0; 3087 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); 3088 } 3089 if (rcu_cpu_stall_notifiers) { 3090 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 3091 if (ret) 3092 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 3093 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 3094 } 3095 for (i = 0; i <= repeat; i++) { 3096 if (kthread_should_stop()) 3097 break; 3098 rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0); 3099 } 3100 pr_alert("%s end.\n", __func__); 3101 if (rcu_cpu_stall_notifiers && !ret) { 3102 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 3103 if (ret) 3104 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 3105 } 3106 torture_shutdown_absorb("rcu_torture_stall"); 3107 while (!kthread_should_stop()) 3108 schedule_timeout_interruptible(10 * HZ); 3109 return 0; 3110 } 3111 3112 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 3113 static int __init rcu_torture_stall_init(void) 3114 { 3115 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 3116 return 0; 3117 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 3118 } 3119 3120 /* State structure for forward-progress self-propagating RCU callback. */ 3121 struct fwd_cb_state { 3122 struct rcu_head rh; 3123 int stop; 3124 }; 3125 3126 /* 3127 * Forward-progress self-propagating RCU callback function. Because 3128 * callbacks run from softirq, this function is an implicit RCU read-side 3129 * critical section. 3130 */ 3131 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 3132 { 3133 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 3134 3135 if (READ_ONCE(fcsp->stop)) { 3136 WRITE_ONCE(fcsp->stop, 2); 3137 return; 3138 } 3139 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 3140 } 3141 3142 /* State for continuous-flood RCU callbacks. */ 3143 struct rcu_fwd_cb { 3144 struct rcu_head rh; 3145 struct rcu_fwd_cb *rfc_next; 3146 struct rcu_fwd *rfc_rfp; 3147 int rfc_gps; 3148 }; 3149 3150 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 3151 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 3152 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 3153 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 3154 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 3155 3156 struct rcu_launder_hist { 3157 long n_launders; 3158 unsigned long launder_gp_seq; 3159 }; 3160 3161 struct rcu_fwd { 3162 spinlock_t rcu_fwd_lock; 3163 struct rcu_fwd_cb *rcu_fwd_cb_head; 3164 struct rcu_fwd_cb **rcu_fwd_cb_tail; 3165 long n_launders_cb; 3166 unsigned long rcu_fwd_startat; 3167 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 3168 unsigned long rcu_launder_gp_seq_start; 3169 int rcu_fwd_id; 3170 }; 3171 3172 static DEFINE_MUTEX(rcu_fwd_mutex); 3173 static struct rcu_fwd *rcu_fwds; 3174 static unsigned long rcu_fwd_seq; 3175 static atomic_long_t rcu_fwd_max_cbs; 3176 static bool rcu_fwd_emergency_stop; 3177 3178 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 3179 { 3180 unsigned long gps; 3181 unsigned long gps_old; 3182 int i; 3183 int j; 3184 3185 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 3186 if (rfp->n_launders_hist[i].n_launders > 0) 3187 break; 3188 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 3189 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 3190 gps_old = rfp->rcu_launder_gp_seq_start; 3191 for (j = 0; j <= i; j++) { 3192 gps = rfp->n_launders_hist[j].launder_gp_seq; 3193 pr_cont(" %ds/%d: %ld:%ld", 3194 j + 1, FWD_CBS_HIST_DIV, 3195 rfp->n_launders_hist[j].n_launders, 3196 rcutorture_seq_diff(gps, gps_old)); 3197 gps_old = gps; 3198 } 3199 pr_cont("\n"); 3200 } 3201 3202 /* Callback function for continuous-flood RCU callbacks. */ 3203 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 3204 { 3205 unsigned long flags; 3206 int i; 3207 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 3208 struct rcu_fwd_cb **rfcpp; 3209 struct rcu_fwd *rfp = rfcp->rfc_rfp; 3210 3211 rfcp->rfc_next = NULL; 3212 rfcp->rfc_gps++; 3213 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 3214 rfcpp = rfp->rcu_fwd_cb_tail; 3215 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 3216 smp_store_release(rfcpp, rfcp); 3217 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 3218 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 3219 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 3220 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 3221 rfp->n_launders_hist[i].n_launders++; 3222 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 3223 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3224 } 3225 3226 // Give the scheduler a chance, even on nohz_full CPUs. 3227 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 3228 { 3229 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 3230 // Real call_rcu() floods hit userspace, so emulate that. 3231 if (need_resched() || (iter & 0xfff)) 3232 schedule(); 3233 return; 3234 } 3235 // No userspace emulation: CB invocation throttles call_rcu() 3236 cond_resched(); 3237 } 3238 3239 /* 3240 * Free all callbacks on the rcu_fwd_cb_head list, either because the 3241 * test is over or because we hit an OOM event. 3242 */ 3243 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 3244 { 3245 unsigned long flags; 3246 unsigned long freed = 0; 3247 struct rcu_fwd_cb *rfcp; 3248 3249 for (;;) { 3250 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 3251 rfcp = rfp->rcu_fwd_cb_head; 3252 if (!rfcp) { 3253 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3254 break; 3255 } 3256 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 3257 if (!rfp->rcu_fwd_cb_head) 3258 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 3259 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3260 kfree(rfcp); 3261 freed++; 3262 rcu_torture_fwd_prog_cond_resched(freed); 3263 if (tick_nohz_full_enabled()) { 3264 local_irq_save(flags); 3265 rcu_momentary_eqs(); 3266 local_irq_restore(flags); 3267 } 3268 } 3269 return freed; 3270 } 3271 3272 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 3273 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 3274 int *tested, int *tested_tries) 3275 { 3276 unsigned long cver; 3277 unsigned long dur; 3278 struct fwd_cb_state fcs; 3279 unsigned long gps; 3280 int idx; 3281 int sd; 3282 int sd4; 3283 bool selfpropcb = false; 3284 unsigned long stopat; 3285 static DEFINE_TORTURE_RANDOM(trs); 3286 3287 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3288 if (!cur_ops->sync) 3289 return; // Cannot do need_resched() forward progress testing without ->sync. 3290 if (cur_ops->call && cur_ops->cb_barrier) { 3291 init_rcu_head_on_stack(&fcs.rh); 3292 selfpropcb = true; 3293 } 3294 3295 /* Tight loop containing cond_resched(). */ 3296 atomic_inc(&rcu_fwd_cb_nodelay); 3297 cur_ops->sync(); /* Later readers see above write. */ 3298 if (selfpropcb) { 3299 WRITE_ONCE(fcs.stop, 0); 3300 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 3301 } 3302 cver = READ_ONCE(rcu_torture_current_version); 3303 gps = cur_ops->get_gp_seq(); 3304 sd = cur_ops->stall_dur() + 1; 3305 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 3306 dur = sd4 + torture_random(&trs) % (sd - sd4); 3307 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 3308 stopat = rfp->rcu_fwd_startat + dur; 3309 while (time_before(jiffies, stopat) && 3310 !shutdown_time_arrived() && 3311 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3312 idx = cur_ops->readlock(); 3313 udelay(10); 3314 cur_ops->readunlock(idx); 3315 if (!fwd_progress_need_resched || need_resched()) 3316 cond_resched(); 3317 } 3318 (*tested_tries)++; 3319 if (!time_before(jiffies, stopat) && 3320 !shutdown_time_arrived() && 3321 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3322 (*tested)++; 3323 cver = READ_ONCE(rcu_torture_current_version) - cver; 3324 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3325 WARN_ON(!cver && gps < 2); 3326 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 3327 rfp->rcu_fwd_id, dur, cver, gps); 3328 } 3329 if (selfpropcb) { 3330 WRITE_ONCE(fcs.stop, 1); 3331 cur_ops->sync(); /* Wait for running CB to complete. */ 3332 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3333 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 3334 } 3335 3336 if (selfpropcb) { 3337 WARN_ON(READ_ONCE(fcs.stop) != 2); 3338 destroy_rcu_head_on_stack(&fcs.rh); 3339 } 3340 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 3341 atomic_dec(&rcu_fwd_cb_nodelay); 3342 } 3343 3344 /* Carry out call_rcu() forward-progress testing. */ 3345 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 3346 { 3347 unsigned long cver; 3348 unsigned long flags; 3349 unsigned long gps; 3350 int i; 3351 long n_launders; 3352 long n_launders_cb_snap; 3353 long n_launders_sa; 3354 long n_max_cbs; 3355 long n_max_gps; 3356 struct rcu_fwd_cb *rfcp; 3357 struct rcu_fwd_cb *rfcpn; 3358 unsigned long stopat; 3359 unsigned long stoppedat; 3360 3361 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3362 if (READ_ONCE(rcu_fwd_emergency_stop)) 3363 return; /* Get out of the way quickly, no GP wait! */ 3364 if (!cur_ops->call) 3365 return; /* Can't do call_rcu() fwd prog without ->call. */ 3366 3367 /* Loop continuously posting RCU callbacks. */ 3368 atomic_inc(&rcu_fwd_cb_nodelay); 3369 cur_ops->sync(); /* Later readers see above write. */ 3370 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 3371 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 3372 n_launders = 0; 3373 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 3374 n_launders_sa = 0; 3375 n_max_cbs = 0; 3376 n_max_gps = 0; 3377 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 3378 rfp->n_launders_hist[i].n_launders = 0; 3379 cver = READ_ONCE(rcu_torture_current_version); 3380 gps = cur_ops->get_gp_seq(); 3381 rfp->rcu_launder_gp_seq_start = gps; 3382 tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. 3383 while (time_before(jiffies, stopat) && 3384 !shutdown_time_arrived() && 3385 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3386 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 3387 rfcpn = NULL; 3388 if (rfcp) 3389 rfcpn = READ_ONCE(rfcp->rfc_next); 3390 if (rfcpn) { 3391 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 3392 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 3393 break; 3394 rfp->rcu_fwd_cb_head = rfcpn; 3395 n_launders++; 3396 n_launders_sa++; 3397 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 3398 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 3399 if (WARN_ON_ONCE(!rfcp)) { 3400 schedule_timeout_interruptible(1); 3401 continue; 3402 } 3403 n_max_cbs++; 3404 n_launders_sa = 0; 3405 rfcp->rfc_gps = 0; 3406 rfcp->rfc_rfp = rfp; 3407 } else { 3408 rfcp = NULL; 3409 } 3410 if (rfcp) 3411 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 3412 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 3413 if (tick_nohz_full_enabled()) { 3414 local_irq_save(flags); 3415 rcu_momentary_eqs(); 3416 local_irq_restore(flags); 3417 } 3418 } 3419 stoppedat = jiffies; 3420 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 3421 cver = READ_ONCE(rcu_torture_current_version) - cver; 3422 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3423 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3424 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 3425 (void)rcu_torture_fwd_prog_cbfree(rfp); 3426 3427 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 3428 !shutdown_time_arrived()) { 3429 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 3430 cur_ops->gp_kthread_dbg(); 3431 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 3432 __func__, 3433 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 3434 n_launders + n_max_cbs - n_launders_cb_snap, 3435 n_launders, n_launders_sa, 3436 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 3437 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 3438 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 3439 rcu_torture_fwd_cb_hist(rfp); 3440 mutex_unlock(&rcu_fwd_mutex); 3441 } 3442 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 3443 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 3444 atomic_dec(&rcu_fwd_cb_nodelay); 3445 } 3446 3447 3448 /* 3449 * OOM notifier, but this only prints diagnostic information for the 3450 * current forward-progress test. 3451 */ 3452 static int rcutorture_oom_notify(struct notifier_block *self, 3453 unsigned long notused, void *nfreed) 3454 { 3455 int i; 3456 long ncbs; 3457 struct rcu_fwd *rfp; 3458 3459 mutex_lock(&rcu_fwd_mutex); 3460 rfp = rcu_fwds; 3461 if (!rfp) { 3462 mutex_unlock(&rcu_fwd_mutex); 3463 return NOTIFY_OK; 3464 } 3465 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 3466 __func__); 3467 for (i = 0; i < fwd_progress; i++) { 3468 rcu_torture_fwd_cb_hist(&rfp[i]); 3469 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 3470 } 3471 WRITE_ONCE(rcu_fwd_emergency_stop, true); 3472 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 3473 ncbs = 0; 3474 for (i = 0; i < fwd_progress; i++) 3475 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3476 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3477 cur_ops->cb_barrier(); 3478 ncbs = 0; 3479 for (i = 0; i < fwd_progress; i++) 3480 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3481 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3482 cur_ops->cb_barrier(); 3483 ncbs = 0; 3484 for (i = 0; i < fwd_progress; i++) 3485 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3486 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3487 smp_mb(); /* Frees before return to avoid redoing OOM. */ 3488 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 3489 pr_info("%s returning after OOM processing.\n", __func__); 3490 mutex_unlock(&rcu_fwd_mutex); 3491 return NOTIFY_OK; 3492 } 3493 3494 static struct notifier_block rcutorture_oom_nb = { 3495 .notifier_call = rcutorture_oom_notify 3496 }; 3497 3498 /* Carry out grace-period forward-progress testing. */ 3499 static int rcu_torture_fwd_prog(void *args) 3500 { 3501 bool firsttime = true; 3502 long max_cbs; 3503 int oldnice = task_nice(current); 3504 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 3505 struct rcu_fwd *rfp = args; 3506 int tested = 0; 3507 int tested_tries = 0; 3508 3509 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 3510 while (!rcu_inkernel_boot_has_ended()) 3511 schedule_timeout_interruptible(HZ / 10); 3512 rcu_bind_current_to_nocb(); 3513 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 3514 set_user_nice(current, MAX_NICE); 3515 do { 3516 if (!rfp->rcu_fwd_id) { 3517 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 3518 WRITE_ONCE(rcu_fwd_emergency_stop, false); 3519 if (!firsttime) { 3520 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 3521 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 3522 } 3523 firsttime = false; 3524 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 3525 } else { 3526 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 3527 schedule_timeout_interruptible(HZ / 20); 3528 oldseq = READ_ONCE(rcu_fwd_seq); 3529 } 3530 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3531 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 3532 rcu_torture_fwd_prog_cr(rfp); 3533 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 3534 (!IS_ENABLED(CONFIG_TINY_RCU) || 3535 (rcu_inkernel_boot_has_ended() && 3536 torture_num_online_cpus() > rfp->rcu_fwd_id))) 3537 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 3538 3539 /* Avoid slow periods, better to test when busy. */ 3540 if (stutter_wait("rcu_torture_fwd_prog")) 3541 sched_set_normal(current, oldnice); 3542 } while (!torture_must_stop()); 3543 /* Short runs might not contain a valid forward-progress attempt. */ 3544 if (!rfp->rcu_fwd_id) { 3545 WARN_ON(!tested && tested_tries >= 5); 3546 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 3547 } 3548 torture_kthread_stopping("rcu_torture_fwd_prog"); 3549 return 0; 3550 } 3551 3552 /* If forward-progress checking is requested and feasible, spawn the thread. */ 3553 static int __init rcu_torture_fwd_prog_init(void) 3554 { 3555 int i; 3556 int ret = 0; 3557 struct rcu_fwd *rfp; 3558 3559 if (!fwd_progress) 3560 return 0; /* Not requested, so don't do it. */ 3561 if (fwd_progress >= nr_cpu_ids) { 3562 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 3563 fwd_progress = nr_cpu_ids; 3564 } else if (fwd_progress < 0) { 3565 fwd_progress = nr_cpu_ids; 3566 } 3567 if ((!cur_ops->sync && !cur_ops->call) || 3568 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 3569 cur_ops == &rcu_busted_ops) { 3570 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 3571 fwd_progress = 0; 3572 return 0; 3573 } 3574 if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3575 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing"); 3576 fwd_progress = 0; 3577 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 3578 return -EINVAL; /* In module, can fail back to user. */ 3579 WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */ 3580 return 0; 3581 } 3582 if (fwd_progress_holdoff <= 0) 3583 fwd_progress_holdoff = 1; 3584 if (fwd_progress_div <= 0) 3585 fwd_progress_div = 4; 3586 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 3587 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 3588 if (!rfp || !fwd_prog_tasks) { 3589 kfree(rfp); 3590 kfree(fwd_prog_tasks); 3591 fwd_prog_tasks = NULL; 3592 fwd_progress = 0; 3593 return -ENOMEM; 3594 } 3595 for (i = 0; i < fwd_progress; i++) { 3596 spin_lock_init(&rfp[i].rcu_fwd_lock); 3597 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3598 rfp[i].rcu_fwd_id = i; 3599 } 3600 mutex_lock(&rcu_fwd_mutex); 3601 rcu_fwds = rfp; 3602 mutex_unlock(&rcu_fwd_mutex); 3603 register_oom_notifier(&rcutorture_oom_nb); 3604 for (i = 0; i < fwd_progress; i++) { 3605 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3606 if (ret) { 3607 fwd_progress = i; 3608 return ret; 3609 } 3610 } 3611 return 0; 3612 } 3613 3614 static void rcu_torture_fwd_prog_cleanup(void) 3615 { 3616 int i; 3617 struct rcu_fwd *rfp; 3618 3619 if (!rcu_fwds || !fwd_prog_tasks) 3620 return; 3621 for (i = 0; i < fwd_progress; i++) 3622 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3623 unregister_oom_notifier(&rcutorture_oom_nb); 3624 mutex_lock(&rcu_fwd_mutex); 3625 rfp = rcu_fwds; 3626 rcu_fwds = NULL; 3627 mutex_unlock(&rcu_fwd_mutex); 3628 kfree(rfp); 3629 kfree(fwd_prog_tasks); 3630 fwd_prog_tasks = NULL; 3631 } 3632 3633 /* Callback function for RCU barrier testing. */ 3634 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3635 { 3636 atomic_inc(&barrier_cbs_invoked); 3637 } 3638 3639 /* IPI handler to get callback posted on desired CPU, if online. */ 3640 static int rcu_torture_barrier1cb(void *rcu_void) 3641 { 3642 struct rcu_head *rhp = rcu_void; 3643 3644 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3645 return 0; 3646 } 3647 3648 /* kthread function to register callbacks used to test RCU barriers. */ 3649 static int rcu_torture_barrier_cbs(void *arg) 3650 { 3651 long myid = (long)arg; 3652 bool lastphase = false; 3653 bool newphase; 3654 struct rcu_head rcu; 3655 3656 init_rcu_head_on_stack(&rcu); 3657 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3658 set_user_nice(current, MAX_NICE); 3659 do { 3660 wait_event(barrier_cbs_wq[myid], 3661 (newphase = 3662 smp_load_acquire(&barrier_phase)) != lastphase || 3663 torture_must_stop()); 3664 lastphase = newphase; 3665 if (torture_must_stop()) 3666 break; 3667 /* 3668 * The above smp_load_acquire() ensures barrier_phase load 3669 * is ordered before the following ->call(). 3670 */ 3671 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3672 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3673 3674 if (atomic_dec_and_test(&barrier_cbs_count)) 3675 wake_up(&barrier_wq); 3676 } while (!torture_must_stop()); 3677 if (cur_ops->cb_barrier != NULL) 3678 cur_ops->cb_barrier(); 3679 destroy_rcu_head_on_stack(&rcu); 3680 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3681 return 0; 3682 } 3683 3684 /* kthread function to drive and coordinate RCU barrier testing. */ 3685 static int rcu_torture_barrier(void *arg) 3686 { 3687 int i; 3688 3689 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3690 do { 3691 atomic_set(&barrier_cbs_invoked, 0); 3692 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3693 /* Ensure barrier_phase ordered after prior assignments. */ 3694 smp_store_release(&barrier_phase, !barrier_phase); 3695 for (i = 0; i < n_barrier_cbs; i++) 3696 wake_up(&barrier_cbs_wq[i]); 3697 wait_event(barrier_wq, 3698 atomic_read(&barrier_cbs_count) == 0 || 3699 torture_must_stop()); 3700 if (torture_must_stop()) 3701 break; 3702 n_barrier_attempts++; 3703 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3704 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3705 n_rcu_torture_barrier_error++; 3706 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3707 atomic_read(&barrier_cbs_invoked), 3708 n_barrier_cbs); 3709 WARN_ON(1); 3710 // Wait manually for the remaining callbacks 3711 i = 0; 3712 do { 3713 if (WARN_ON(i++ > HZ)) 3714 i = INT_MIN; 3715 schedule_timeout_interruptible(1); 3716 cur_ops->cb_barrier(); 3717 } while (atomic_read(&barrier_cbs_invoked) != 3718 n_barrier_cbs && 3719 !torture_must_stop()); 3720 smp_mb(); // Can't trust ordering if broken. 3721 if (!torture_must_stop()) 3722 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3723 atomic_read(&barrier_cbs_invoked)); 3724 } else { 3725 n_barrier_successes++; 3726 } 3727 schedule_timeout_interruptible(HZ / 10); 3728 } while (!torture_must_stop()); 3729 torture_kthread_stopping("rcu_torture_barrier"); 3730 return 0; 3731 } 3732 3733 /* Initialize RCU barrier testing. */ 3734 static int rcu_torture_barrier_init(void) 3735 { 3736 int i; 3737 int ret; 3738 3739 if (n_barrier_cbs <= 0) 3740 return 0; 3741 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3742 pr_alert("%s" TORTURE_FLAG 3743 " Call or barrier ops missing for %s,\n", 3744 torture_type, cur_ops->name); 3745 pr_alert("%s" TORTURE_FLAG 3746 " RCU barrier testing omitted from run.\n", 3747 torture_type); 3748 return 0; 3749 } 3750 atomic_set(&barrier_cbs_count, 0); 3751 atomic_set(&barrier_cbs_invoked, 0); 3752 barrier_cbs_tasks = 3753 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3754 GFP_KERNEL); 3755 barrier_cbs_wq = 3756 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3757 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3758 return -ENOMEM; 3759 for (i = 0; i < n_barrier_cbs; i++) { 3760 init_waitqueue_head(&barrier_cbs_wq[i]); 3761 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3762 (void *)(long)i, 3763 barrier_cbs_tasks[i]); 3764 if (ret) 3765 return ret; 3766 } 3767 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3768 } 3769 3770 /* Clean up after RCU barrier testing. */ 3771 static void rcu_torture_barrier_cleanup(void) 3772 { 3773 int i; 3774 3775 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3776 if (barrier_cbs_tasks != NULL) { 3777 for (i = 0; i < n_barrier_cbs; i++) 3778 torture_stop_kthread(rcu_torture_barrier_cbs, 3779 barrier_cbs_tasks[i]); 3780 kfree(barrier_cbs_tasks); 3781 barrier_cbs_tasks = NULL; 3782 } 3783 if (barrier_cbs_wq != NULL) { 3784 kfree(barrier_cbs_wq); 3785 barrier_cbs_wq = NULL; 3786 } 3787 } 3788 3789 static bool rcu_torture_can_boost(void) 3790 { 3791 static int boost_warn_once; 3792 int prio; 3793 3794 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3795 return false; 3796 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3797 return false; 3798 3799 prio = rcu_get_gp_kthreads_prio(); 3800 if (!prio) 3801 return false; 3802 3803 if (prio < 2) { 3804 if (boost_warn_once == 1) 3805 return false; 3806 3807 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3808 boost_warn_once = 1; 3809 return false; 3810 } 3811 3812 return true; 3813 } 3814 3815 static bool read_exit_child_stop; 3816 static bool read_exit_child_stopped; 3817 static wait_queue_head_t read_exit_wq; 3818 3819 // Child kthread which just does an rcutorture reader and exits. 3820 static int rcu_torture_read_exit_child(void *trsp_in) 3821 { 3822 struct torture_random_state *trsp = trsp_in; 3823 3824 set_user_nice(current, MAX_NICE); 3825 // Minimize time between reading and exiting. 3826 while (!kthread_should_stop()) 3827 schedule_timeout_uninterruptible(HZ / 20); 3828 (void)rcu_torture_one_read(trsp, -1); 3829 return 0; 3830 } 3831 3832 // Parent kthread which creates and destroys read-exit child kthreads. 3833 static int rcu_torture_read_exit(void *unused) 3834 { 3835 bool errexit = false; 3836 int i; 3837 struct task_struct *tsp; 3838 DEFINE_TORTURE_RANDOM(trs); 3839 3840 // Allocate and initialize. 3841 set_user_nice(current, MAX_NICE); 3842 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3843 3844 // Each pass through this loop does one read-exit episode. 3845 do { 3846 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3847 for (i = 0; i < read_exit_burst; i++) { 3848 if (READ_ONCE(read_exit_child_stop)) 3849 break; 3850 stutter_wait("rcu_torture_read_exit"); 3851 // Spawn child. 3852 tsp = kthread_run(rcu_torture_read_exit_child, 3853 &trs, "%s", "rcu_torture_read_exit_child"); 3854 if (IS_ERR(tsp)) { 3855 TOROUT_ERRSTRING("out of memory"); 3856 errexit = true; 3857 break; 3858 } 3859 cond_resched(); 3860 kthread_stop(tsp); 3861 n_read_exits++; 3862 } 3863 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3864 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3865 i = 0; 3866 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3867 schedule_timeout_uninterruptible(HZ); 3868 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3869 3870 // Clean up and exit. 3871 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3872 smp_mb(); // Store before wakeup. 3873 wake_up(&read_exit_wq); 3874 while (!torture_must_stop()) 3875 schedule_timeout_uninterruptible(HZ / 20); 3876 torture_kthread_stopping("rcu_torture_read_exit"); 3877 return 0; 3878 } 3879 3880 static int rcu_torture_read_exit_init(void) 3881 { 3882 if (read_exit_burst <= 0) 3883 return 0; 3884 init_waitqueue_head(&read_exit_wq); 3885 read_exit_child_stop = false; 3886 read_exit_child_stopped = false; 3887 return torture_create_kthread(rcu_torture_read_exit, NULL, 3888 read_exit_task); 3889 } 3890 3891 static void rcu_torture_read_exit_cleanup(void) 3892 { 3893 if (!read_exit_task) 3894 return; 3895 WRITE_ONCE(read_exit_child_stop, true); 3896 smp_mb(); // Above write before wait. 3897 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3898 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3899 } 3900 3901 static void rcutorture_test_nmis(int n) 3902 { 3903 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3904 int cpu; 3905 int dumpcpu; 3906 int i; 3907 3908 for (i = 0; i < n; i++) { 3909 preempt_disable(); 3910 cpu = smp_processor_id(); 3911 dumpcpu = cpu + 1; 3912 if (dumpcpu >= nr_cpu_ids) 3913 dumpcpu = 0; 3914 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3915 dump_cpu_task(dumpcpu); 3916 preempt_enable(); 3917 schedule_timeout_uninterruptible(15 * HZ); 3918 } 3919 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3920 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3921 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3922 } 3923 3924 // Randomly preempt online CPUs. 3925 static int rcu_torture_preempt(void *unused) 3926 { 3927 int cpu = -1; 3928 DEFINE_TORTURE_RANDOM(rand); 3929 3930 schedule_timeout_idle(stall_cpu_holdoff); 3931 do { 3932 // Wait for preempt_interval ms with up to 100us fuzz. 3933 torture_hrtimeout_ms(preempt_interval, 100, &rand); 3934 // Select online CPU. 3935 cpu = cpumask_next(cpu, cpu_online_mask); 3936 if (cpu >= nr_cpu_ids) 3937 cpu = cpumask_next(-1, cpu_online_mask); 3938 WARN_ON_ONCE(cpu >= nr_cpu_ids); 3939 // Move to that CPU, if can't do so, retry later. 3940 if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false)) 3941 continue; 3942 // Preempt at high-ish priority, then reset to normal. 3943 sched_set_fifo(current); 3944 torture_sched_setaffinity(current->pid, cpu_present_mask, true); 3945 mdelay(preempt_duration); 3946 sched_set_normal(current, 0); 3947 stutter_wait("rcu_torture_preempt"); 3948 } while (!torture_must_stop()); 3949 torture_kthread_stopping("rcu_torture_preempt"); 3950 return 0; 3951 } 3952 3953 static enum cpuhp_state rcutor_hp; 3954 3955 static struct hrtimer gpwrap_lag_timer; 3956 static bool gpwrap_lag_active; 3957 3958 /* Timer handler for toggling RCU grace-period sequence overflow test lag value */ 3959 static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer) 3960 { 3961 ktime_t next_delay; 3962 3963 if (gpwrap_lag_active) { 3964 pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n"); 3965 cur_ops->set_gpwrap_lag(0); 3966 gpwrap_lag_active = false; 3967 next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0); 3968 } else { 3969 pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps); 3970 cur_ops->set_gpwrap_lag(gpwrap_lag_gps); 3971 gpwrap_lag_active = true; 3972 next_delay = ktime_set(gpwrap_lag_active_mins * 60, 0); 3973 } 3974 3975 if (torture_must_stop_irq()) 3976 return HRTIMER_NORESTART; 3977 3978 hrtimer_forward_now(timer, next_delay); 3979 return HRTIMER_RESTART; 3980 } 3981 3982 static int rcu_gpwrap_lag_init(void) 3983 { 3984 if (!gpwrap_lag) 3985 return 0; 3986 3987 if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) { 3988 pr_alert("rcu-torture: lag timing parameters must be positive\n"); 3989 return -EINVAL; 3990 } 3991 3992 hrtimer_setup(&gpwrap_lag_timer, rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3993 gpwrap_lag_active = false; 3994 hrtimer_start(&gpwrap_lag_timer, 3995 ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL); 3996 3997 return 0; 3998 } 3999 4000 static void rcu_gpwrap_lag_cleanup(void) 4001 { 4002 hrtimer_cancel(&gpwrap_lag_timer); 4003 cur_ops->set_gpwrap_lag(0); 4004 gpwrap_lag_active = false; 4005 } 4006 static void 4007 rcu_torture_cleanup(void) 4008 { 4009 int firsttime; 4010 int flags = 0; 4011 unsigned long gp_seq = 0; 4012 int i; 4013 int j; 4014 4015 if (torture_cleanup_begin()) { 4016 if (cur_ops->cb_barrier != NULL) { 4017 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 4018 cur_ops->cb_barrier(); 4019 } 4020 if (cur_ops->gp_slow_unregister) 4021 cur_ops->gp_slow_unregister(NULL); 4022 return; 4023 } 4024 if (!cur_ops) { 4025 torture_cleanup_end(); 4026 return; 4027 } 4028 4029 rcutorture_test_nmis(test_nmis); 4030 4031 if (cur_ops->gp_kthread_dbg) 4032 cur_ops->gp_kthread_dbg(); 4033 torture_stop_kthread(rcu_torture_preempt, preempt_task); 4034 rcu_torture_read_exit_cleanup(); 4035 rcu_torture_barrier_cleanup(); 4036 rcu_torture_fwd_prog_cleanup(); 4037 torture_stop_kthread(rcu_torture_stall, stall_task); 4038 torture_stop_kthread(rcu_torture_writer, writer_task); 4039 4040 if (nocb_tasks) { 4041 for (i = 0; i < nrealnocbers; i++) 4042 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 4043 kfree(nocb_tasks); 4044 nocb_tasks = NULL; 4045 } 4046 4047 if (updown_task) { 4048 torture_stop_kthread(rcu_torture_updown, updown_task); 4049 updown_task = NULL; 4050 } 4051 if (reader_tasks) { 4052 for (i = 0; i < nrealreaders; i++) 4053 torture_stop_kthread(rcu_torture_reader, 4054 reader_tasks[i]); 4055 kfree(reader_tasks); 4056 reader_tasks = NULL; 4057 } 4058 kfree(rcu_torture_reader_mbchk); 4059 rcu_torture_reader_mbchk = NULL; 4060 4061 if (fakewriter_tasks) { 4062 for (i = 0; i < nrealfakewriters; i++) 4063 torture_stop_kthread(rcu_torture_fakewriter, 4064 fakewriter_tasks[i]); 4065 kfree(fakewriter_tasks); 4066 fakewriter_tasks = NULL; 4067 } 4068 4069 if (cur_ops->get_gp_data) 4070 cur_ops->get_gp_data(&flags, &gp_seq); 4071 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 4072 cur_ops->name, (long)gp_seq, flags, 4073 rcutorture_seq_diff(gp_seq, start_gp_seq)); 4074 torture_stop_kthread(rcu_torture_stats, stats_task); 4075 torture_stop_kthread(rcu_torture_fqs, fqs_task); 4076 if (rcu_torture_can_boost() && rcutor_hp >= 0) 4077 cpuhp_remove_state(rcutor_hp); 4078 4079 /* 4080 * Wait for all RCU callbacks to fire, then do torture-type-specific 4081 * cleanup operations. 4082 */ 4083 if (cur_ops->cb_barrier != NULL) { 4084 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 4085 cur_ops->cb_barrier(); 4086 } 4087 if (cur_ops->cleanup != NULL) 4088 cur_ops->cleanup(); 4089 4090 rcu_torture_mem_dump_obj(); 4091 4092 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 4093 4094 if (err_segs_recorded) { 4095 pr_alert("Failure/close-call rcutorture reader segments:\n"); 4096 if (rt_read_nsegs == 0) 4097 pr_alert("\t: No segments recorded!!!\n"); 4098 firsttime = 1; 4099 for (i = 0; i < rt_read_nsegs; i++) { 4100 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP)) 4101 pr_alert("\t%lluus ", div64_u64(err_segs[i].rt_ts, 1000ULL)); 4102 else 4103 pr_alert("\t"); 4104 pr_cont("%d: %#4x", i, err_segs[i].rt_readstate); 4105 if (err_segs[i].rt_delay_jiffies != 0) { 4106 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 4107 err_segs[i].rt_delay_jiffies); 4108 firsttime = 0; 4109 } 4110 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 4111 pr_cont(" CPU %2d", err_segs[i].rt_cpu); 4112 if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu) 4113 pr_cont("->%-2d", err_segs[i].rt_end_cpu); 4114 else 4115 pr_cont(" ..."); 4116 } 4117 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && 4118 cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) { 4119 char buf1[20+1]; 4120 char buf2[20+1]; 4121 char sepchar = '-'; 4122 4123 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq, 4124 buf1, ARRAY_SIZE(buf1)); 4125 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end, 4126 buf2, ARRAY_SIZE(buf2)); 4127 if (err_segs[i].rt_gp_seq == err_segs[i].rt_gp_seq_end) { 4128 if (buf2[0]) { 4129 for (j = 0; buf2[j]; j++) 4130 buf2[j] = '.'; 4131 if (j) 4132 buf2[j - 1] = ' '; 4133 } 4134 sepchar = ' '; 4135 } 4136 pr_cont(" %s%c%s", buf1, sepchar, buf2); 4137 } 4138 if (err_segs[i].rt_delay_ms != 0) { 4139 pr_cont(" %s%ldms", firsttime ? "" : "+", 4140 err_segs[i].rt_delay_ms); 4141 firsttime = 0; 4142 } 4143 if (err_segs[i].rt_delay_us != 0) { 4144 pr_cont(" %s%ldus", firsttime ? "" : "+", 4145 err_segs[i].rt_delay_us); 4146 firsttime = 0; 4147 } 4148 pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : ""); 4149 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH) 4150 pr_cont(" BH"); 4151 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ) 4152 pr_cont(" IRQ"); 4153 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT) 4154 pr_cont(" PREEMPT"); 4155 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH) 4156 pr_cont(" RBH"); 4157 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED) 4158 pr_cont(" SCHED"); 4159 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1) 4160 pr_cont(" RCU_1"); 4161 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2) 4162 pr_cont(" RCU_2"); 4163 pr_cont("\n"); 4164 4165 } 4166 if (rt_read_preempted) 4167 pr_alert("\tReader was preempted.\n"); 4168 } 4169 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 4170 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 4171 else if (torture_onoff_failures()) 4172 rcu_torture_print_module_parms(cur_ops, 4173 "End of test: RCU_HOTPLUG"); 4174 else 4175 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 4176 torture_cleanup_end(); 4177 if (cur_ops->gp_slow_unregister) 4178 cur_ops->gp_slow_unregister(NULL); 4179 4180 if (gpwrap_lag && cur_ops->set_gpwrap_lag) 4181 rcu_gpwrap_lag_cleanup(); 4182 } 4183 4184 static void rcu_torture_leak_cb(struct rcu_head *rhp) 4185 { 4186 } 4187 4188 static void rcu_torture_err_cb(struct rcu_head *rhp) 4189 { 4190 /* 4191 * This -might- happen due to race conditions, but is unlikely. 4192 * The scenario that leads to this happening is that the 4193 * first of the pair of duplicate callbacks is queued, 4194 * someone else starts a grace period that includes that 4195 * callback, then the second of the pair must wait for the 4196 * next grace period. Unlikely, but can happen. If it 4197 * does happen, the debug-objects subsystem won't have splatted. 4198 */ 4199 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 4200 } 4201 4202 /* 4203 * Verify that double-free causes debug-objects to complain, but only 4204 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 4205 * cannot be carried out. 4206 */ 4207 static void rcu_test_debug_objects(void) 4208 { 4209 struct rcu_head rh1; 4210 struct rcu_head rh2; 4211 int idx; 4212 4213 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 4214 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 4215 KBUILD_MODNAME, cur_ops->name); 4216 return; 4217 } 4218 4219 if (WARN_ON_ONCE(cur_ops->debug_objects && 4220 (!cur_ops->call || !cur_ops->cb_barrier))) 4221 return; 4222 4223 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 4224 4225 init_rcu_head_on_stack(&rh1); 4226 init_rcu_head_on_stack(&rh2); 4227 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 4228 4229 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 4230 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 4231 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 4232 cur_ops->call(&rh2, rcu_torture_leak_cb); 4233 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 4234 if (rhp) { 4235 cur_ops->call(rhp, rcu_torture_leak_cb); 4236 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 4237 } 4238 cur_ops->readunlock(idx); 4239 4240 /* Wait for them all to get done so we can safely return. */ 4241 cur_ops->cb_barrier(); 4242 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 4243 destroy_rcu_head_on_stack(&rh1); 4244 destroy_rcu_head_on_stack(&rh2); 4245 kfree(rhp); 4246 } 4247 4248 static void rcutorture_sync(void) 4249 { 4250 static unsigned long n; 4251 4252 if (cur_ops->sync && !(++n & 0xfff)) 4253 cur_ops->sync(); 4254 } 4255 4256 static DEFINE_MUTEX(mut0); 4257 static DEFINE_MUTEX(mut1); 4258 static DEFINE_MUTEX(mut2); 4259 static DEFINE_MUTEX(mut3); 4260 static DEFINE_MUTEX(mut4); 4261 static DEFINE_MUTEX(mut5); 4262 static DEFINE_MUTEX(mut6); 4263 static DEFINE_MUTEX(mut7); 4264 static DEFINE_MUTEX(mut8); 4265 static DEFINE_MUTEX(mut9); 4266 4267 static DECLARE_RWSEM(rwsem0); 4268 static DECLARE_RWSEM(rwsem1); 4269 static DECLARE_RWSEM(rwsem2); 4270 static DECLARE_RWSEM(rwsem3); 4271 static DECLARE_RWSEM(rwsem4); 4272 static DECLARE_RWSEM(rwsem5); 4273 static DECLARE_RWSEM(rwsem6); 4274 static DECLARE_RWSEM(rwsem7); 4275 static DECLARE_RWSEM(rwsem8); 4276 static DECLARE_RWSEM(rwsem9); 4277 4278 DEFINE_STATIC_SRCU(srcu0); 4279 DEFINE_STATIC_SRCU(srcu1); 4280 DEFINE_STATIC_SRCU(srcu2); 4281 DEFINE_STATIC_SRCU(srcu3); 4282 DEFINE_STATIC_SRCU(srcu4); 4283 DEFINE_STATIC_SRCU(srcu5); 4284 DEFINE_STATIC_SRCU(srcu6); 4285 DEFINE_STATIC_SRCU(srcu7); 4286 DEFINE_STATIC_SRCU(srcu8); 4287 DEFINE_STATIC_SRCU(srcu9); 4288 4289 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 4290 int cyclelen, int deadlock) 4291 { 4292 int j = i + 1; 4293 4294 if (j >= cyclelen) 4295 j = deadlock ? 0 : -1; 4296 if (j >= 0) 4297 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 4298 else 4299 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 4300 return j; 4301 } 4302 4303 // Test lockdep on SRCU-based deadlock scenarios. 4304 static void rcu_torture_init_srcu_lockdep(void) 4305 { 4306 int cyclelen; 4307 int deadlock; 4308 bool err = false; 4309 int i; 4310 int j; 4311 int idx; 4312 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 4313 &mut5, &mut6, &mut7, &mut8, &mut9 }; 4314 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 4315 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 4316 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 4317 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 4318 int testtype; 4319 4320 if (!test_srcu_lockdep) 4321 return; 4322 4323 deadlock = test_srcu_lockdep / 1000; 4324 testtype = (test_srcu_lockdep / 10) % 100; 4325 cyclelen = test_srcu_lockdep % 10; 4326 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 4327 if (WARN_ONCE(deadlock != !!deadlock, 4328 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 4329 __func__, test_srcu_lockdep, deadlock)) 4330 err = true; 4331 if (WARN_ONCE(cyclelen <= 0, 4332 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 4333 __func__, test_srcu_lockdep, cyclelen)) 4334 err = true; 4335 if (err) 4336 goto err_out; 4337 4338 if (testtype == 0) { 4339 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 4340 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4341 if (deadlock && cyclelen == 1) 4342 pr_info("%s: Expect hang.\n", __func__); 4343 for (i = 0; i < cyclelen; i++) { 4344 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 4345 "srcu_read_unlock", i, cyclelen, deadlock); 4346 idx = srcu_read_lock(srcus[i]); 4347 if (j >= 0) 4348 synchronize_srcu(srcus[j]); 4349 srcu_read_unlock(srcus[i], idx); 4350 } 4351 return; 4352 } 4353 4354 if (testtype == 1) { 4355 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 4356 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4357 for (i = 0; i < cyclelen; i++) { 4358 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 4359 __func__, i, i, i, i); 4360 idx = srcu_read_lock(srcus[i]); 4361 mutex_lock(muts[i]); 4362 mutex_unlock(muts[i]); 4363 srcu_read_unlock(srcus[i], idx); 4364 4365 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 4366 "mutex_unlock", i, cyclelen, deadlock); 4367 mutex_lock(muts[i]); 4368 if (j >= 0) 4369 synchronize_srcu(srcus[j]); 4370 mutex_unlock(muts[i]); 4371 } 4372 return; 4373 } 4374 4375 if (testtype == 2) { 4376 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 4377 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4378 for (i = 0; i < cyclelen; i++) { 4379 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 4380 __func__, i, i, i, i); 4381 idx = srcu_read_lock(srcus[i]); 4382 down_read(rwsems[i]); 4383 up_read(rwsems[i]); 4384 srcu_read_unlock(srcus[i], idx); 4385 4386 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 4387 "up_write", i, cyclelen, deadlock); 4388 down_write(rwsems[i]); 4389 if (j >= 0) 4390 synchronize_srcu(srcus[j]); 4391 up_write(rwsems[i]); 4392 } 4393 return; 4394 } 4395 4396 #ifdef CONFIG_TASKS_TRACE_RCU 4397 if (testtype == 3) { 4398 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 4399 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4400 if (deadlock && cyclelen == 1) 4401 pr_info("%s: Expect hang.\n", __func__); 4402 for (i = 0; i < cyclelen; i++) { 4403 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 4404 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 4405 : "synchronize_srcu"; 4406 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 4407 4408 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 4409 if (i == 0) 4410 rcu_read_lock_trace(); 4411 else 4412 idx = srcu_read_lock(srcus[i]); 4413 if (j >= 0) { 4414 if (i == cyclelen - 1) 4415 synchronize_rcu_tasks_trace(); 4416 else 4417 synchronize_srcu(srcus[j]); 4418 } 4419 if (i == 0) 4420 rcu_read_unlock_trace(); 4421 else 4422 srcu_read_unlock(srcus[i], idx); 4423 } 4424 return; 4425 } 4426 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 4427 4428 err_out: 4429 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 4430 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 4431 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 4432 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 4433 pr_info("%s: L: Cycle length.\n", __func__); 4434 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 4435 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 4436 } 4437 4438 static int __init 4439 rcu_torture_init(void) 4440 { 4441 long i; 4442 int cpu; 4443 int firsterr = 0; 4444 int flags = 0; 4445 unsigned long gp_seq = 0; 4446 static struct rcu_torture_ops *torture_ops[] = { 4447 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 4448 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 4449 &trivial_ops, 4450 }; 4451 4452 if (!torture_init_begin(torture_type, verbose)) 4453 return -EBUSY; 4454 4455 /* Process args and tell the world that the torturer is on the job. */ 4456 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 4457 cur_ops = torture_ops[i]; 4458 if (strcmp(torture_type, cur_ops->name) == 0) 4459 break; 4460 } 4461 if (i == ARRAY_SIZE(torture_ops)) { 4462 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 4463 torture_type); 4464 pr_alert("rcu-torture types:"); 4465 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 4466 pr_cont(" %s", torture_ops[i]->name); 4467 pr_cont("\n"); 4468 firsterr = -EINVAL; 4469 cur_ops = NULL; 4470 goto unwind; 4471 } 4472 if (cur_ops->fqs == NULL && fqs_duration != 0) { 4473 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 4474 fqs_duration = 0; 4475 } 4476 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 4477 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 4478 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 4479 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 4480 nocbs_nthreads = 0; 4481 } 4482 if (cur_ops->init) 4483 cur_ops->init(); 4484 4485 rcu_torture_init_srcu_lockdep(); 4486 4487 if (nfakewriters >= 0) { 4488 nrealfakewriters = nfakewriters; 4489 } else { 4490 nrealfakewriters = num_online_cpus() - 2 - nfakewriters; 4491 if (nrealfakewriters <= 0) 4492 nrealfakewriters = 1; 4493 } 4494 4495 if (nreaders >= 0) { 4496 nrealreaders = nreaders; 4497 } else { 4498 nrealreaders = num_online_cpus() - 2 - nreaders; 4499 if (nrealreaders <= 0) 4500 nrealreaders = 1; 4501 } 4502 rcu_torture_print_module_parms(cur_ops, "Start of test"); 4503 if (cur_ops->get_gp_data) 4504 cur_ops->get_gp_data(&flags, &gp_seq); 4505 start_gp_seq = gp_seq; 4506 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 4507 cur_ops->name, (long)gp_seq, flags); 4508 4509 /* Set up the freelist. */ 4510 4511 INIT_LIST_HEAD(&rcu_torture_freelist); 4512 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 4513 rcu_tortures[i].rtort_mbtest = 0; 4514 list_add_tail(&rcu_tortures[i].rtort_free, 4515 &rcu_torture_freelist); 4516 } 4517 4518 /* Initialize the statistics so that each run gets its own numbers. */ 4519 4520 rcu_torture_current = NULL; 4521 rcu_torture_current_version = 0; 4522 atomic_set(&n_rcu_torture_alloc, 0); 4523 atomic_set(&n_rcu_torture_alloc_fail, 0); 4524 atomic_set(&n_rcu_torture_free, 0); 4525 atomic_set(&n_rcu_torture_mberror, 0); 4526 atomic_set(&n_rcu_torture_mbchk_fail, 0); 4527 atomic_set(&n_rcu_torture_mbchk_tries, 0); 4528 atomic_set(&n_rcu_torture_error, 0); 4529 n_rcu_torture_barrier_error = 0; 4530 n_rcu_torture_boost_ktrerror = 0; 4531 n_rcu_torture_boost_failure = 0; 4532 n_rcu_torture_boosts = 0; 4533 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 4534 atomic_set(&rcu_torture_wcount[i], 0); 4535 for_each_possible_cpu(cpu) { 4536 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 4537 per_cpu(rcu_torture_count, cpu)[i] = 0; 4538 per_cpu(rcu_torture_batch, cpu)[i] = 0; 4539 } 4540 } 4541 err_segs_recorded = 0; 4542 rt_read_nsegs = 0; 4543 4544 /* Start up the kthreads. */ 4545 4546 rcu_torture_write_types(); 4547 if (nrealfakewriters > 0) { 4548 fakewriter_tasks = kcalloc(nrealfakewriters, 4549 sizeof(fakewriter_tasks[0]), 4550 GFP_KERNEL); 4551 if (fakewriter_tasks == NULL) { 4552 TOROUT_ERRSTRING("out of memory"); 4553 firsterr = -ENOMEM; 4554 goto unwind; 4555 } 4556 } 4557 for (i = 0; i < nrealfakewriters; i++) { 4558 firsterr = torture_create_kthread(rcu_torture_fakewriter, 4559 NULL, fakewriter_tasks[i]); 4560 if (torture_init_error(firsterr)) 4561 goto unwind; 4562 } 4563 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 4564 GFP_KERNEL); 4565 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 4566 GFP_KERNEL); 4567 if (!reader_tasks || !rcu_torture_reader_mbchk) { 4568 TOROUT_ERRSTRING("out of memory"); 4569 firsterr = -ENOMEM; 4570 goto unwind; 4571 } 4572 for (i = 0; i < nrealreaders; i++) { 4573 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 4574 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 4575 reader_tasks[i]); 4576 if (torture_init_error(firsterr)) 4577 goto unwind; 4578 } 4579 4580 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 4581 writer_task); 4582 if (torture_init_error(firsterr)) 4583 goto unwind; 4584 4585 firsterr = rcu_torture_updown_init(); 4586 if (torture_init_error(firsterr)) 4587 goto unwind; 4588 nrealnocbers = nocbs_nthreads; 4589 if (WARN_ON(nrealnocbers < 0)) 4590 nrealnocbers = 1; 4591 if (WARN_ON(nocbs_toggle < 0)) 4592 nocbs_toggle = HZ; 4593 if (nrealnocbers > 0) { 4594 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 4595 if (nocb_tasks == NULL) { 4596 TOROUT_ERRSTRING("out of memory"); 4597 firsterr = -ENOMEM; 4598 goto unwind; 4599 } 4600 } else { 4601 nocb_tasks = NULL; 4602 } 4603 for (i = 0; i < nrealnocbers; i++) { 4604 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 4605 if (torture_init_error(firsterr)) 4606 goto unwind; 4607 } 4608 if (stat_interval > 0) { 4609 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 4610 stats_task); 4611 if (torture_init_error(firsterr)) 4612 goto unwind; 4613 } 4614 if (test_no_idle_hz && shuffle_interval > 0) { 4615 firsterr = torture_shuffle_init(shuffle_interval * HZ); 4616 if (torture_init_error(firsterr)) 4617 goto unwind; 4618 } 4619 if (stutter < 0) 4620 stutter = 0; 4621 if (stutter) { 4622 int t; 4623 4624 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 4625 firsterr = torture_stutter_init(stutter * HZ, t); 4626 if (torture_init_error(firsterr)) 4627 goto unwind; 4628 } 4629 if (fqs_duration < 0) 4630 fqs_duration = 0; 4631 if (fqs_holdoff < 0) 4632 fqs_holdoff = 0; 4633 if (fqs_duration && fqs_holdoff) { 4634 /* Create the fqs thread */ 4635 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 4636 fqs_task); 4637 if (torture_init_error(firsterr)) 4638 goto unwind; 4639 } 4640 if (test_boost_interval < 1) 4641 test_boost_interval = 1; 4642 if (test_boost_duration < 2) 4643 test_boost_duration = 2; 4644 if (rcu_torture_can_boost()) { 4645 4646 boost_starttime = jiffies + test_boost_interval * HZ; 4647 4648 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 4649 rcutorture_booster_init, 4650 rcutorture_booster_cleanup); 4651 rcutor_hp = firsterr; 4652 if (torture_init_error(firsterr)) 4653 goto unwind; 4654 } 4655 shutdown_jiffies = jiffies + shutdown_secs * HZ; 4656 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 4657 if (torture_init_error(firsterr)) 4658 goto unwind; 4659 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 4660 rcutorture_sync); 4661 if (torture_init_error(firsterr)) 4662 goto unwind; 4663 firsterr = rcu_torture_stall_init(); 4664 if (torture_init_error(firsterr)) 4665 goto unwind; 4666 firsterr = rcu_torture_fwd_prog_init(); 4667 if (torture_init_error(firsterr)) 4668 goto unwind; 4669 firsterr = rcu_torture_barrier_init(); 4670 if (torture_init_error(firsterr)) 4671 goto unwind; 4672 firsterr = rcu_torture_read_exit_init(); 4673 if (torture_init_error(firsterr)) 4674 goto unwind; 4675 if (preempt_duration > 0) { 4676 firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task); 4677 if (torture_init_error(firsterr)) 4678 goto unwind; 4679 } 4680 if (object_debug) 4681 rcu_test_debug_objects(); 4682 4683 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 4684 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 4685 4686 if (gpwrap_lag && cur_ops->set_gpwrap_lag) { 4687 firsterr = rcu_gpwrap_lag_init(); 4688 if (torture_init_error(firsterr)) 4689 goto unwind; 4690 } 4691 4692 torture_init_end(); 4693 return 0; 4694 4695 unwind: 4696 torture_init_end(); 4697 rcu_torture_cleanup(); 4698 if (shutdown_secs) { 4699 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 4700 kernel_power_off(); 4701 } 4702 return firsterr; 4703 } 4704 4705 module_init(rcu_torture_init); 4706 module_exit(rcu_torture_cleanup); 4707