1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 // Bits for ->extendables field, extendables param, and related definitions. 59 #define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits. 60 #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits. 62 #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh. 64 #define RCUTORTURE_RDR_IRQ 0x02 // ... disabling interrupts. 65 #define RCUTORTURE_RDR_PREEMPT 0x04 // ... disabling preemption. 66 #define RCUTORTURE_RDR_RBH 0x08 // ... rcu_read_lock_bh(). 67 #define RCUTORTURE_RDR_SCHED 0x10 // ... rcu_read_lock_sched(). 68 #define RCUTORTURE_RDR_RCU_1 0x20 // ... entering another RCU reader. 69 #define RCUTORTURE_RDR_RCU_2 0x40 // ... entering another RCU reader. 70 #define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer. 71 // Note: Manual start, automatic end. 72 #define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above. 73 #define RCUTORTURE_MAX_EXTEND \ 74 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 75 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) // Intentionally omit RCUTORTURE_RDR_UPDOWN. 76 #define RCUTORTURE_RDR_ALLBITS \ 77 (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \ 78 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2) 79 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 80 /* Must be power of two minus one. */ 81 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 82 83 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 84 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 85 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 86 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 87 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 88 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 89 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 90 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 91 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 92 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 93 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 94 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 95 torture_param(bool, gp_cond_exp_full, false, 96 "Use conditional/async full-stateexpedited GP wait primitives"); 97 torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ, 98 "Wait interval for normal conditional grace periods, us (default 16 jiffies)"); 99 torture_param(int, gp_cond_wi_exp, 128, 100 "Wait interval for expedited conditional grace periods, us (default 128 us)"); 101 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 102 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 103 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 104 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 105 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 106 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 107 torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ, 108 "Wait interval for normal polled grace periods, us (default 16 jiffies)"); 109 torture_param(int, gp_poll_wi_exp, 128, 110 "Wait interval for expedited polled grace periods, us (default 128 us)"); 111 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 112 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 113 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 114 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 115 torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers"); 116 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 117 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 118 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 119 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 120 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 121 torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing"); 122 torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period."); 123 torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)"); 124 torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)"); 125 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 126 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 127 torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable"); 128 torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)"); 129 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 130 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 131 torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit."); 132 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 133 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 134 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 135 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 136 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 137 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 138 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 139 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one."); 140 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 141 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 142 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 143 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 144 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 145 torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds."); 146 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 147 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 148 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 149 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 150 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 151 152 static char *torture_type = "rcu"; 153 module_param(torture_type, charp, 0444); 154 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 155 156 static int nrealnocbers; 157 static int nrealreaders; 158 static int nrealfakewriters; 159 static struct task_struct *writer_task; 160 static struct task_struct **fakewriter_tasks; 161 static struct task_struct **reader_tasks; 162 static struct task_struct *updown_task; 163 static struct task_struct **nocb_tasks; 164 static struct task_struct *stats_task; 165 static struct task_struct *fqs_task; 166 static struct task_struct *boost_tasks[NR_CPUS]; 167 static struct task_struct *stall_task; 168 static struct task_struct **fwd_prog_tasks; 169 static struct task_struct **barrier_cbs_tasks; 170 static struct task_struct *barrier_task; 171 static struct task_struct *read_exit_task; 172 static struct task_struct *preempt_task; 173 174 #define RCU_TORTURE_PIPE_LEN 10 175 176 // Mailbox-like structure to check RCU global memory ordering. 177 struct rcu_torture_reader_check { 178 unsigned long rtc_myloops; 179 int rtc_chkrdr; 180 unsigned long rtc_chkloops; 181 int rtc_ready; 182 struct rcu_torture_reader_check *rtc_assigner; 183 } ____cacheline_internodealigned_in_smp; 184 185 // Update-side data structure used to check RCU readers. 186 struct rcu_torture { 187 struct rcu_head rtort_rcu; 188 int rtort_pipe_count; 189 struct list_head rtort_free; 190 int rtort_mbtest; 191 struct rcu_torture_reader_check *rtort_chkp; 192 }; 193 194 static LIST_HEAD(rcu_torture_freelist); 195 static struct rcu_torture __rcu *rcu_torture_current; 196 static unsigned long rcu_torture_current_version; 197 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 198 static DEFINE_SPINLOCK(rcu_torture_lock); 199 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 200 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 201 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 202 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 203 static atomic_t n_rcu_torture_alloc; 204 static atomic_t n_rcu_torture_alloc_fail; 205 static atomic_t n_rcu_torture_free; 206 static atomic_t n_rcu_torture_mberror; 207 static atomic_t n_rcu_torture_mbchk_fail; 208 static atomic_t n_rcu_torture_mbchk_tries; 209 static atomic_t n_rcu_torture_error; 210 static long n_rcu_torture_barrier_error; 211 static long n_rcu_torture_boost_ktrerror; 212 static long n_rcu_torture_boost_failure; 213 static long n_rcu_torture_boosts; 214 static atomic_long_t n_rcu_torture_timers; 215 static long n_barrier_attempts; 216 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 217 static unsigned long n_read_exits; 218 static struct list_head rcu_torture_removed; 219 static unsigned long shutdown_jiffies; 220 static unsigned long start_gp_seq; 221 static atomic_long_t n_nocb_offload; 222 static atomic_long_t n_nocb_deoffload; 223 224 static int rcu_torture_writer_state; 225 #define RTWS_FIXED_DELAY 0 226 #define RTWS_DELAY 1 227 #define RTWS_REPLACE 2 228 #define RTWS_DEF_FREE 3 229 #define RTWS_EXP_SYNC 4 230 #define RTWS_COND_GET 5 231 #define RTWS_COND_GET_FULL 6 232 #define RTWS_COND_GET_EXP 7 233 #define RTWS_COND_GET_EXP_FULL 8 234 #define RTWS_COND_SYNC 9 235 #define RTWS_COND_SYNC_FULL 10 236 #define RTWS_COND_SYNC_EXP 11 237 #define RTWS_COND_SYNC_EXP_FULL 12 238 #define RTWS_POLL_GET 13 239 #define RTWS_POLL_GET_FULL 14 240 #define RTWS_POLL_GET_EXP 15 241 #define RTWS_POLL_GET_EXP_FULL 16 242 #define RTWS_POLL_WAIT 17 243 #define RTWS_POLL_WAIT_FULL 18 244 #define RTWS_POLL_WAIT_EXP 19 245 #define RTWS_POLL_WAIT_EXP_FULL 20 246 #define RTWS_SYNC 21 247 #define RTWS_STUTTER 22 248 #define RTWS_STOPPING 23 249 static const char * const rcu_torture_writer_state_names[] = { 250 "RTWS_FIXED_DELAY", 251 "RTWS_DELAY", 252 "RTWS_REPLACE", 253 "RTWS_DEF_FREE", 254 "RTWS_EXP_SYNC", 255 "RTWS_COND_GET", 256 "RTWS_COND_GET_FULL", 257 "RTWS_COND_GET_EXP", 258 "RTWS_COND_GET_EXP_FULL", 259 "RTWS_COND_SYNC", 260 "RTWS_COND_SYNC_FULL", 261 "RTWS_COND_SYNC_EXP", 262 "RTWS_COND_SYNC_EXP_FULL", 263 "RTWS_POLL_GET", 264 "RTWS_POLL_GET_FULL", 265 "RTWS_POLL_GET_EXP", 266 "RTWS_POLL_GET_EXP_FULL", 267 "RTWS_POLL_WAIT", 268 "RTWS_POLL_WAIT_FULL", 269 "RTWS_POLL_WAIT_EXP", 270 "RTWS_POLL_WAIT_EXP_FULL", 271 "RTWS_SYNC", 272 "RTWS_STUTTER", 273 "RTWS_STOPPING", 274 }; 275 276 /* Record reader segment types and duration for first failing read. */ 277 struct rt_read_seg { 278 int rt_readstate; 279 unsigned long rt_delay_jiffies; 280 unsigned long rt_delay_ms; 281 unsigned long rt_delay_us; 282 bool rt_preempted; 283 int rt_cpu; 284 int rt_end_cpu; 285 unsigned long long rt_gp_seq; 286 unsigned long long rt_gp_seq_end; 287 u64 rt_ts; 288 }; 289 static int err_segs_recorded; 290 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 291 static int rt_read_nsegs; 292 static int rt_read_preempted; 293 294 static const char *rcu_torture_writer_state_getname(void) 295 { 296 unsigned int i = READ_ONCE(rcu_torture_writer_state); 297 298 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 299 return "???"; 300 return rcu_torture_writer_state_names[i]; 301 } 302 303 #ifdef CONFIG_RCU_TRACE 304 static u64 notrace rcu_trace_clock_local(void) 305 { 306 u64 ts = trace_clock_local(); 307 308 (void)do_div(ts, NSEC_PER_USEC); 309 return ts; 310 } 311 #else /* #ifdef CONFIG_RCU_TRACE */ 312 static u64 notrace rcu_trace_clock_local(void) 313 { 314 return 0ULL; 315 } 316 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 317 318 /* 319 * Stop aggressive CPU-hog tests a bit before the end of the test in order 320 * to avoid interfering with test shutdown. 321 */ 322 static bool shutdown_time_arrived(void) 323 { 324 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 325 } 326 327 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 328 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 329 /* and boost task create/destroy. */ 330 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 331 static bool barrier_phase; /* Test phase. */ 332 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 333 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 334 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 335 336 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 337 338 /* 339 * Allocate an element from the rcu_tortures pool. 340 */ 341 static struct rcu_torture * 342 rcu_torture_alloc(void) 343 { 344 struct list_head *p; 345 346 spin_lock_bh(&rcu_torture_lock); 347 if (list_empty(&rcu_torture_freelist)) { 348 atomic_inc(&n_rcu_torture_alloc_fail); 349 spin_unlock_bh(&rcu_torture_lock); 350 return NULL; 351 } 352 atomic_inc(&n_rcu_torture_alloc); 353 p = rcu_torture_freelist.next; 354 list_del_init(p); 355 spin_unlock_bh(&rcu_torture_lock); 356 return container_of(p, struct rcu_torture, rtort_free); 357 } 358 359 /* 360 * Free an element to the rcu_tortures pool. 361 */ 362 static void 363 rcu_torture_free(struct rcu_torture *p) 364 { 365 atomic_inc(&n_rcu_torture_free); 366 spin_lock_bh(&rcu_torture_lock); 367 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 368 spin_unlock_bh(&rcu_torture_lock); 369 } 370 371 /* 372 * Operations vector for selecting different types of tests. 373 */ 374 375 struct rcu_torture_ops { 376 int ttype; 377 void (*init)(void); 378 void (*cleanup)(void); 379 int (*readlock)(void); 380 void (*read_delay)(struct torture_random_state *rrsp, 381 struct rt_read_seg *rtrsp); 382 void (*readunlock)(int idx); 383 int (*readlock_held)(void); // lockdep. 384 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not. 385 int (*down_read)(void); 386 void (*up_read)(int idx); 387 unsigned long (*get_gp_seq)(void); 388 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 389 void (*deferred_free)(struct rcu_torture *p); 390 void (*sync)(void); 391 void (*exp_sync)(void); 392 void (*exp_current)(void); 393 unsigned long (*get_gp_state_exp)(void); 394 unsigned long (*start_gp_poll_exp)(void); 395 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 396 bool (*poll_gp_state_exp)(unsigned long oldstate); 397 void (*cond_sync_exp)(unsigned long oldstate); 398 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 399 unsigned long (*get_comp_state)(void); 400 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 401 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 402 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 403 unsigned long (*get_gp_state)(void); 404 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 405 unsigned long (*start_gp_poll)(void); 406 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 407 bool (*poll_gp_state)(unsigned long oldstate); 408 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 409 bool (*poll_need_2gp)(bool poll, bool poll_full); 410 void (*cond_sync)(unsigned long oldstate); 411 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 412 int poll_active; 413 int poll_active_full; 414 call_rcu_func_t call; 415 void (*cb_barrier)(void); 416 void (*fqs)(void); 417 void (*stats)(void); 418 void (*gp_kthread_dbg)(void); 419 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 420 int (*stall_dur)(void); 421 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 422 void (*gp_slow_register)(atomic_t *rgssp); 423 void (*gp_slow_unregister)(atomic_t *rgssp); 424 bool (*reader_blocked)(void); 425 unsigned long long (*gather_gp_seqs)(void); 426 void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len); 427 void (*set_gpwrap_lag)(unsigned long lag); 428 int (*get_gpwrap_count)(int cpu); 429 long cbflood_max; 430 int irq_capable; 431 int can_boost; 432 int extendables; 433 int slow_gps; 434 int no_pi_lock; 435 int debug_objects; 436 int start_poll_irqsoff; 437 int have_up_down; 438 const char *name; 439 }; 440 441 static struct rcu_torture_ops *cur_ops; 442 443 /* 444 * Definitions for rcu torture testing. 445 */ 446 447 static int torture_readlock_not_held(void) 448 { 449 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 450 } 451 452 static int rcu_torture_read_lock(void) 453 { 454 rcu_read_lock(); 455 return 0; 456 } 457 458 static void 459 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 460 { 461 unsigned long started; 462 unsigned long completed; 463 const unsigned long shortdelay_us = 200; 464 unsigned long longdelay_ms = 300; 465 unsigned long long ts; 466 467 /* We want a short delay sometimes to make a reader delay the grace 468 * period, and we want a long delay occasionally to trigger 469 * force_quiescent_state. */ 470 471 if (!atomic_read(&rcu_fwd_cb_nodelay) && 472 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 473 started = cur_ops->get_gp_seq(); 474 ts = rcu_trace_clock_local(); 475 if ((preempt_count() & HARDIRQ_MASK) || softirq_count()) 476 longdelay_ms = 5; /* Avoid triggering BH limits. */ 477 mdelay(longdelay_ms); 478 rtrsp->rt_delay_ms = longdelay_ms; 479 completed = cur_ops->get_gp_seq(); 480 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 481 started, completed); 482 } 483 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 484 udelay(shortdelay_us); 485 rtrsp->rt_delay_us = shortdelay_us; 486 } 487 if (!preempt_count() && 488 !(torture_random(rrsp) % (nrealreaders * 500))) 489 torture_preempt_schedule(); /* QS only if preemptible. */ 490 } 491 492 static void rcu_torture_read_unlock(int idx) 493 { 494 rcu_read_unlock(); 495 } 496 497 static int rcu_torture_readlock_nesting(void) 498 { 499 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 500 return rcu_preempt_depth(); 501 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 502 return (preempt_count() & PREEMPT_MASK); 503 return -1; 504 } 505 506 /* 507 * Update callback in the pipe. This should be invoked after a grace period. 508 */ 509 static bool 510 rcu_torture_pipe_update_one(struct rcu_torture *rp) 511 { 512 int i; 513 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 514 515 if (rtrcp) { 516 WRITE_ONCE(rp->rtort_chkp, NULL); 517 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 518 } 519 i = rp->rtort_pipe_count; 520 if (i > RCU_TORTURE_PIPE_LEN) 521 i = RCU_TORTURE_PIPE_LEN; 522 atomic_inc(&rcu_torture_wcount[i]); 523 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 524 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 525 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 526 rp->rtort_mbtest = 0; 527 return true; 528 } 529 return false; 530 } 531 532 /* 533 * Update all callbacks in the pipe. Suitable for synchronous grace-period 534 * primitives. 535 */ 536 static void 537 rcu_torture_pipe_update(struct rcu_torture *old_rp) 538 { 539 struct rcu_torture *rp; 540 struct rcu_torture *rp1; 541 542 if (old_rp) 543 list_add(&old_rp->rtort_free, &rcu_torture_removed); 544 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 545 if (rcu_torture_pipe_update_one(rp)) { 546 list_del(&rp->rtort_free); 547 rcu_torture_free(rp); 548 } 549 } 550 } 551 552 static void 553 rcu_torture_cb(struct rcu_head *p) 554 { 555 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 556 557 if (torture_must_stop_irq()) { 558 /* Test is ending, just drop callbacks on the floor. */ 559 /* The next initialization will pick up the pieces. */ 560 return; 561 } 562 if (rcu_torture_pipe_update_one(rp)) 563 rcu_torture_free(rp); 564 else 565 cur_ops->deferred_free(rp); 566 } 567 568 static unsigned long rcu_no_completed(void) 569 { 570 return 0; 571 } 572 573 static void rcu_torture_deferred_free(struct rcu_torture *p) 574 { 575 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 576 } 577 578 static void rcu_sync_torture_init(void) 579 { 580 INIT_LIST_HEAD(&rcu_torture_removed); 581 } 582 583 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 584 { 585 return poll; 586 } 587 588 static struct rcu_torture_ops rcu_ops = { 589 .ttype = RCU_FLAVOR, 590 .init = rcu_sync_torture_init, 591 .readlock = rcu_torture_read_lock, 592 .read_delay = rcu_read_delay, 593 .readunlock = rcu_torture_read_unlock, 594 .readlock_held = torture_readlock_not_held, 595 .readlock_nesting = rcu_torture_readlock_nesting, 596 .get_gp_seq = rcu_get_gp_seq, 597 .gp_diff = rcu_seq_diff, 598 .deferred_free = rcu_torture_deferred_free, 599 .sync = synchronize_rcu, 600 .exp_sync = synchronize_rcu_expedited, 601 .same_gp_state = same_state_synchronize_rcu, 602 .same_gp_state_full = same_state_synchronize_rcu_full, 603 .get_comp_state = get_completed_synchronize_rcu, 604 .get_comp_state_full = get_completed_synchronize_rcu_full, 605 .get_gp_state = get_state_synchronize_rcu, 606 .get_gp_state_full = get_state_synchronize_rcu_full, 607 .start_gp_poll = start_poll_synchronize_rcu, 608 .start_gp_poll_full = start_poll_synchronize_rcu_full, 609 .poll_gp_state = poll_state_synchronize_rcu, 610 .poll_gp_state_full = poll_state_synchronize_rcu_full, 611 .poll_need_2gp = rcu_poll_need_2gp, 612 .cond_sync = cond_synchronize_rcu, 613 .cond_sync_full = cond_synchronize_rcu_full, 614 .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE, 615 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE, 616 .get_gp_state_exp = get_state_synchronize_rcu, 617 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 618 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 619 .poll_gp_state_exp = poll_state_synchronize_rcu, 620 .cond_sync_exp = cond_synchronize_rcu_expedited, 621 .cond_sync_exp_full = cond_synchronize_rcu_expedited_full, 622 .call = call_rcu_hurry, 623 .cb_barrier = rcu_barrier, 624 .fqs = rcu_force_quiescent_state, 625 .gp_kthread_dbg = show_rcu_gp_kthreads, 626 .check_boost_failed = rcu_check_boost_fail, 627 .stall_dur = rcu_jiffies_till_stall_check, 628 .get_gp_data = rcutorture_get_gp_data, 629 .gp_slow_register = rcu_gp_slow_register, 630 .gp_slow_unregister = rcu_gp_slow_unregister, 631 .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU) 632 ? has_rcu_reader_blocked 633 : NULL, 634 .gather_gp_seqs = rcutorture_gather_gp_seqs, 635 .format_gp_seqs = rcutorture_format_gp_seqs, 636 .set_gpwrap_lag = rcu_set_gpwrap_lag, 637 .get_gpwrap_count = rcu_get_gpwrap_count, 638 .irq_capable = 1, 639 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 640 .extendables = RCUTORTURE_MAX_EXTEND, 641 .debug_objects = 1, 642 .start_poll_irqsoff = 1, 643 .name = "rcu" 644 }; 645 646 /* 647 * Don't even think about trying any of these in real life!!! 648 * The names includes "busted", and they really means it! 649 * The only purpose of these functions is to provide a buggy RCU 650 * implementation to make sure that rcutorture correctly emits 651 * buggy-RCU error messages. 652 */ 653 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 654 { 655 /* This is a deliberate bug for testing purposes only! */ 656 rcu_torture_cb(&p->rtort_rcu); 657 } 658 659 static void synchronize_rcu_busted(void) 660 { 661 /* This is a deliberate bug for testing purposes only! */ 662 } 663 664 static void 665 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 666 { 667 /* This is a deliberate bug for testing purposes only! */ 668 func(head); 669 } 670 671 static struct rcu_torture_ops rcu_busted_ops = { 672 .ttype = INVALID_RCU_FLAVOR, 673 .init = rcu_sync_torture_init, 674 .readlock = rcu_torture_read_lock, 675 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 676 .readunlock = rcu_torture_read_unlock, 677 .readlock_held = torture_readlock_not_held, 678 .get_gp_seq = rcu_no_completed, 679 .deferred_free = rcu_busted_torture_deferred_free, 680 .sync = synchronize_rcu_busted, 681 .exp_sync = synchronize_rcu_busted, 682 .call = call_rcu_busted, 683 .gather_gp_seqs = rcutorture_gather_gp_seqs, 684 .format_gp_seqs = rcutorture_format_gp_seqs, 685 .irq_capable = 1, 686 .extendables = RCUTORTURE_MAX_EXTEND, 687 .name = "busted" 688 }; 689 690 /* 691 * Definitions for srcu torture testing. 692 */ 693 694 DEFINE_STATIC_SRCU(srcu_ctl); 695 DEFINE_STATIC_SRCU_FAST(srcu_ctlf); 696 DEFINE_STATIC_SRCU_FAST_UPDOWN(srcu_ctlfud); 697 static struct srcu_struct srcu_ctld; 698 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 699 static struct rcu_torture_ops srcud_ops; 700 701 static void srcu_torture_init(void) 702 { 703 rcu_sync_torture_init(); 704 if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) 705 VERBOSE_TOROUT_STRING("srcu_torture_init normal SRCU"); 706 if (reader_flavor & SRCU_READ_FLAVOR_NMI) 707 VERBOSE_TOROUT_STRING("srcu_torture_init NMI-safe SRCU"); 708 if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 709 srcu_ctlp = &srcu_ctlf; 710 VERBOSE_TOROUT_STRING("srcu_torture_init fast SRCU"); 711 } 712 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 713 srcu_ctlp = &srcu_ctlfud; 714 VERBOSE_TOROUT_STRING("srcu_torture_init fast-up/down SRCU"); 715 } 716 } 717 718 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 719 { 720 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 721 } 722 723 static int srcu_torture_read_lock(void) 724 { 725 int idx; 726 struct srcu_ctr __percpu *scp; 727 int ret = 0; 728 729 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); 730 731 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 732 idx = srcu_read_lock(srcu_ctlp); 733 WARN_ON_ONCE(idx & ~0x1); 734 ret += idx; 735 } 736 if (reader_flavor & SRCU_READ_FLAVOR_NMI) { 737 idx = srcu_read_lock_nmisafe(srcu_ctlp); 738 WARN_ON_ONCE(idx & ~0x1); 739 ret += idx << 1; 740 } 741 if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 742 scp = srcu_read_lock_fast(srcu_ctlp); 743 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 744 WARN_ON_ONCE(idx & ~0x1); 745 ret += idx << 2; 746 } 747 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 748 scp = srcu_read_lock_fast_updown(srcu_ctlp); 749 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 750 WARN_ON_ONCE(idx & ~0x1); 751 ret += idx << 3; 752 } 753 return ret; 754 } 755 756 static void 757 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 758 { 759 long delay; 760 const long uspertick = 1000000 / HZ; 761 const long longdelay = 10; 762 763 /* We want there to be long-running readers, but not all the time. */ 764 765 delay = torture_random(rrsp) % 766 (nrealreaders * 2 * longdelay * uspertick); 767 if (!delay && in_task()) { 768 schedule_timeout_interruptible(longdelay); 769 rtrsp->rt_delay_jiffies = longdelay; 770 } else { 771 rcu_read_delay(rrsp, rtrsp); 772 } 773 } 774 775 static void srcu_torture_read_unlock(int idx) 776 { 777 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 778 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) 779 srcu_read_unlock_fast_updown(srcu_ctlp, 780 __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3)); 781 if (reader_flavor & SRCU_READ_FLAVOR_FAST) 782 srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x4) >> 2)); 783 if (reader_flavor & SRCU_READ_FLAVOR_NMI) 784 srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1); 785 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 786 srcu_read_unlock(srcu_ctlp, idx & 0x1); 787 } 788 789 static int torture_srcu_read_lock_held(void) 790 { 791 return srcu_read_lock_held(srcu_ctlp); 792 } 793 794 static bool srcu_torture_have_up_down(void) 795 { 796 int rf = reader_flavor; 797 798 if (!rf) 799 rf = SRCU_READ_FLAVOR_NORMAL; 800 return !!(cur_ops->have_up_down & rf); 801 } 802 803 static int srcu_torture_down_read(void) 804 { 805 int idx; 806 struct srcu_ctr __percpu *scp; 807 808 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); 809 WARN_ON_ONCE(reader_flavor & (reader_flavor - 1)); 810 811 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 812 idx = srcu_down_read(srcu_ctlp); 813 WARN_ON_ONCE(idx & ~0x1); 814 return idx; 815 } 816 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 817 scp = srcu_down_read_fast(srcu_ctlp); 818 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 819 WARN_ON_ONCE(idx & ~0x1); 820 return idx << 3; 821 } 822 WARN_ON_ONCE(1); 823 return 0; 824 } 825 826 static void srcu_torture_up_read(int idx) 827 { 828 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 829 if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) 830 srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3)); 831 else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || 832 !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 833 srcu_up_read(srcu_ctlp, idx & 0x1); 834 else 835 WARN_ON_ONCE(1); 836 } 837 838 static unsigned long srcu_torture_completed(void) 839 { 840 return srcu_batches_completed(srcu_ctlp); 841 } 842 843 static void srcu_torture_deferred_free(struct rcu_torture *rp) 844 { 845 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 846 } 847 848 static void srcu_torture_synchronize(void) 849 { 850 synchronize_srcu(srcu_ctlp); 851 } 852 853 static unsigned long srcu_torture_get_gp_state(void) 854 { 855 return get_state_synchronize_srcu(srcu_ctlp); 856 } 857 858 static unsigned long srcu_torture_start_gp_poll(void) 859 { 860 return start_poll_synchronize_srcu(srcu_ctlp); 861 } 862 863 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 864 { 865 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 866 } 867 868 static void srcu_torture_call(struct rcu_head *head, 869 rcu_callback_t func) 870 { 871 call_srcu(srcu_ctlp, head, func); 872 } 873 874 static void srcu_torture_barrier(void) 875 { 876 srcu_barrier(srcu_ctlp); 877 } 878 879 static void srcu_torture_stats(void) 880 { 881 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 882 } 883 884 static void srcu_torture_synchronize_expedited(void) 885 { 886 synchronize_srcu_expedited(srcu_ctlp); 887 } 888 889 static void srcu_torture_expedite_current(void) 890 { 891 srcu_expedite_current(srcu_ctlp); 892 } 893 894 static struct rcu_torture_ops srcu_ops = { 895 .ttype = SRCU_FLAVOR, 896 .init = srcu_torture_init, 897 .readlock = srcu_torture_read_lock, 898 .read_delay = srcu_read_delay, 899 .readunlock = srcu_torture_read_unlock, 900 .down_read = srcu_torture_down_read, 901 .up_read = srcu_torture_up_read, 902 .readlock_held = torture_srcu_read_lock_held, 903 .get_gp_seq = srcu_torture_completed, 904 .gp_diff = rcu_seq_diff, 905 .deferred_free = srcu_torture_deferred_free, 906 .sync = srcu_torture_synchronize, 907 .exp_sync = srcu_torture_synchronize_expedited, 908 .exp_current = srcu_torture_expedite_current, 909 .same_gp_state = same_state_synchronize_srcu, 910 .get_comp_state = get_completed_synchronize_srcu, 911 .get_gp_state = srcu_torture_get_gp_state, 912 .start_gp_poll = srcu_torture_start_gp_poll, 913 .poll_gp_state = srcu_torture_poll_gp_state, 914 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 915 .call = srcu_torture_call, 916 .cb_barrier = srcu_torture_barrier, 917 .stats = srcu_torture_stats, 918 .get_gp_data = srcu_get_gp_data, 919 .cbflood_max = 50000, 920 .irq_capable = 1, 921 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 922 .debug_objects = 1, 923 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) 924 ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN, 925 .name = "srcu" 926 }; 927 928 static void srcud_torture_init(void) 929 { 930 rcu_sync_torture_init(); 931 if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) { 932 WARN_ON(init_srcu_struct(&srcu_ctld)); 933 VERBOSE_TOROUT_STRING("srcud_torture_init normal SRCU"); 934 } else if (reader_flavor & SRCU_READ_FLAVOR_NMI) { 935 WARN_ON(init_srcu_struct(&srcu_ctld)); 936 VERBOSE_TOROUT_STRING("srcud_torture_init NMI-safe SRCU"); 937 } else if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 938 WARN_ON(init_srcu_struct_fast(&srcu_ctld)); 939 VERBOSE_TOROUT_STRING("srcud_torture_init fast SRCU"); 940 } else if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) { 941 WARN_ON(init_srcu_struct_fast_updown(&srcu_ctld)); 942 VERBOSE_TOROUT_STRING("srcud_torture_init fast-up/down SRCU"); 943 } else { 944 WARN_ON(init_srcu_struct(&srcu_ctld)); 945 } 946 srcu_ctlp = &srcu_ctld; 947 } 948 949 static void srcu_torture_cleanup(void) 950 { 951 cleanup_srcu_struct(&srcu_ctld); 952 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 953 } 954 955 /* As above, but dynamically allocated. */ 956 static struct rcu_torture_ops srcud_ops = { 957 .ttype = SRCU_FLAVOR, 958 .init = srcud_torture_init, 959 .cleanup = srcu_torture_cleanup, 960 .readlock = srcu_torture_read_lock, 961 .read_delay = srcu_read_delay, 962 .readunlock = srcu_torture_read_unlock, 963 .readlock_held = torture_srcu_read_lock_held, 964 .down_read = srcu_torture_down_read, 965 .up_read = srcu_torture_up_read, 966 .get_gp_seq = srcu_torture_completed, 967 .gp_diff = rcu_seq_diff, 968 .deferred_free = srcu_torture_deferred_free, 969 .sync = srcu_torture_synchronize, 970 .exp_sync = srcu_torture_synchronize_expedited, 971 .exp_current = srcu_torture_expedite_current, 972 .same_gp_state = same_state_synchronize_srcu, 973 .get_comp_state = get_completed_synchronize_srcu, 974 .get_gp_state = srcu_torture_get_gp_state, 975 .start_gp_poll = srcu_torture_start_gp_poll, 976 .poll_gp_state = srcu_torture_poll_gp_state, 977 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 978 .call = srcu_torture_call, 979 .cb_barrier = srcu_torture_barrier, 980 .stats = srcu_torture_stats, 981 .get_gp_data = srcu_get_gp_data, 982 .cbflood_max = 50000, 983 .irq_capable = 1, 984 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 985 .debug_objects = 1, 986 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) 987 ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN, 988 .name = "srcud" 989 }; 990 991 /* As above, but broken due to inappropriate reader extension. */ 992 static struct rcu_torture_ops busted_srcud_ops = { 993 .ttype = SRCU_FLAVOR, 994 .init = srcu_torture_init, 995 .cleanup = srcu_torture_cleanup, 996 .readlock = srcu_torture_read_lock, 997 .read_delay = rcu_read_delay, 998 .readunlock = srcu_torture_read_unlock, 999 .readlock_held = torture_srcu_read_lock_held, 1000 .get_gp_seq = srcu_torture_completed, 1001 .deferred_free = srcu_torture_deferred_free, 1002 .sync = srcu_torture_synchronize, 1003 .exp_sync = srcu_torture_synchronize_expedited, 1004 .call = srcu_torture_call, 1005 .cb_barrier = srcu_torture_barrier, 1006 .stats = srcu_torture_stats, 1007 .irq_capable = 1, 1008 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 1009 .extendables = RCUTORTURE_MAX_EXTEND, 1010 .name = "busted_srcud" 1011 }; 1012 1013 /* 1014 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 1015 * This implementation does not work well with CPU hotplug nor 1016 * with rcutorture's shuffling. 1017 */ 1018 1019 static void synchronize_rcu_trivial(void) 1020 { 1021 int cpu; 1022 1023 for_each_online_cpu(cpu) { 1024 torture_sched_setaffinity(current->pid, cpumask_of(cpu), true); 1025 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 1026 } 1027 } 1028 1029 static void rcu_sync_torture_init_trivial(void) 1030 { 1031 rcu_sync_torture_init(); 1032 // if (onoff_interval || shuffle_interval) { 1033 if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) { 1034 onoff_interval = 0; 1035 shuffle_interval = 0; 1036 } 1037 } 1038 1039 static int rcu_torture_read_lock_trivial(void) 1040 { 1041 preempt_disable(); 1042 return 0; 1043 } 1044 1045 static void rcu_torture_read_unlock_trivial(int idx) 1046 { 1047 preempt_enable(); 1048 } 1049 1050 static struct rcu_torture_ops trivial_ops = { 1051 .ttype = RCU_TRIVIAL_FLAVOR, 1052 .init = rcu_sync_torture_init_trivial, 1053 .readlock = rcu_torture_read_lock_trivial, 1054 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1055 .readunlock = rcu_torture_read_unlock_trivial, 1056 .readlock_held = torture_readlock_not_held, 1057 .get_gp_seq = rcu_no_completed, 1058 .sync = synchronize_rcu_trivial, 1059 .exp_sync = synchronize_rcu_trivial, 1060 .irq_capable = 1, 1061 .name = "trivial" 1062 }; 1063 1064 #ifdef CONFIG_TASKS_RCU 1065 1066 /* 1067 * Definitions for RCU-tasks torture testing. 1068 */ 1069 1070 static int tasks_torture_read_lock(void) 1071 { 1072 return 0; 1073 } 1074 1075 static void tasks_torture_read_unlock(int idx) 1076 { 1077 } 1078 1079 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 1080 { 1081 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 1082 } 1083 1084 static void synchronize_rcu_mult_test(void) 1085 { 1086 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 1087 } 1088 1089 static struct rcu_torture_ops tasks_ops = { 1090 .ttype = RCU_TASKS_FLAVOR, 1091 .init = rcu_sync_torture_init, 1092 .readlock = tasks_torture_read_lock, 1093 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1094 .readunlock = tasks_torture_read_unlock, 1095 .get_gp_seq = rcu_no_completed, 1096 .deferred_free = rcu_tasks_torture_deferred_free, 1097 .sync = synchronize_rcu_tasks, 1098 .exp_sync = synchronize_rcu_mult_test, 1099 .call = call_rcu_tasks, 1100 .cb_barrier = rcu_barrier_tasks, 1101 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 1102 .get_gp_data = rcu_tasks_get_gp_data, 1103 .irq_capable = 1, 1104 .slow_gps = 1, 1105 .name = "tasks" 1106 }; 1107 1108 #define TASKS_OPS &tasks_ops, 1109 1110 #else // #ifdef CONFIG_TASKS_RCU 1111 1112 #define TASKS_OPS 1113 1114 #endif // #else #ifdef CONFIG_TASKS_RCU 1115 1116 1117 #ifdef CONFIG_TASKS_RUDE_RCU 1118 1119 /* 1120 * Definitions for rude RCU-tasks torture testing. 1121 */ 1122 1123 static struct rcu_torture_ops tasks_rude_ops = { 1124 .ttype = RCU_TASKS_RUDE_FLAVOR, 1125 .init = rcu_sync_torture_init, 1126 .readlock = rcu_torture_read_lock_trivial, 1127 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1128 .readunlock = rcu_torture_read_unlock_trivial, 1129 .get_gp_seq = rcu_no_completed, 1130 .sync = synchronize_rcu_tasks_rude, 1131 .exp_sync = synchronize_rcu_tasks_rude, 1132 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 1133 .get_gp_data = rcu_tasks_rude_get_gp_data, 1134 .cbflood_max = 50000, 1135 .irq_capable = 1, 1136 .name = "tasks-rude" 1137 }; 1138 1139 #define TASKS_RUDE_OPS &tasks_rude_ops, 1140 1141 #else // #ifdef CONFIG_TASKS_RUDE_RCU 1142 1143 #define TASKS_RUDE_OPS 1144 1145 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 1146 1147 1148 #ifdef CONFIG_TASKS_TRACE_RCU 1149 1150 /* 1151 * Definitions for tracing RCU-tasks torture testing. 1152 */ 1153 1154 static int tasks_tracing_torture_read_lock(void) 1155 { 1156 rcu_read_lock_trace(); 1157 return 0; 1158 } 1159 1160 static void tasks_tracing_torture_read_unlock(int idx) 1161 { 1162 rcu_read_unlock_trace(); 1163 } 1164 1165 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 1166 { 1167 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 1168 } 1169 1170 static struct rcu_torture_ops tasks_tracing_ops = { 1171 .ttype = RCU_TASKS_TRACING_FLAVOR, 1172 .init = rcu_sync_torture_init, 1173 .readlock = tasks_tracing_torture_read_lock, 1174 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 1175 .readunlock = tasks_tracing_torture_read_unlock, 1176 .readlock_held = rcu_read_lock_trace_held, 1177 .get_gp_seq = rcu_no_completed, 1178 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 1179 .sync = synchronize_rcu_tasks_trace, 1180 .exp_sync = synchronize_rcu_tasks_trace, 1181 .exp_current = rcu_tasks_trace_expedite_current, 1182 .call = call_rcu_tasks_trace, 1183 .cb_barrier = rcu_barrier_tasks_trace, 1184 .cbflood_max = 50000, 1185 .irq_capable = 1, 1186 .slow_gps = 1, 1187 .name = "tasks-tracing" 1188 }; 1189 1190 #define TASKS_TRACING_OPS &tasks_tracing_ops, 1191 1192 #else // #ifdef CONFIG_TASKS_TRACE_RCU 1193 1194 #define TASKS_TRACING_OPS 1195 1196 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 1197 1198 1199 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 1200 { 1201 if (!cur_ops->gp_diff) 1202 return new - old; 1203 return cur_ops->gp_diff(new, old); 1204 } 1205 1206 /* 1207 * RCU torture priority-boost testing. Runs one real-time thread per 1208 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1209 * for them to complete. If a given grace period takes too long, we assume 1210 * that priority inversion has occurred. 1211 */ 1212 1213 static int old_rt_runtime = -1; 1214 1215 static void rcu_torture_disable_rt_throttle(void) 1216 { 1217 /* 1218 * Disable RT throttling so that rcutorture's boost threads don't get 1219 * throttled. Only possible if rcutorture is built-in otherwise the 1220 * user should manually do this by setting the sched_rt_period_us and 1221 * sched_rt_runtime sysctls. 1222 */ 1223 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1224 return; 1225 1226 old_rt_runtime = sysctl_sched_rt_runtime; 1227 sysctl_sched_rt_runtime = -1; 1228 } 1229 1230 static void rcu_torture_enable_rt_throttle(void) 1231 { 1232 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1233 return; 1234 1235 sysctl_sched_rt_runtime = old_rt_runtime; 1236 old_rt_runtime = -1; 1237 } 1238 1239 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1240 { 1241 int cpu; 1242 static int dbg_done; 1243 unsigned long end = jiffies; 1244 bool gp_done; 1245 unsigned long j; 1246 static unsigned long last_persist; 1247 unsigned long lp; 1248 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1249 1250 if (end - *start > mininterval) { 1251 // Recheck after checking time to avoid false positives. 1252 smp_mb(); // Time check before grace-period check. 1253 if (cur_ops->poll_gp_state(gp_state)) 1254 return false; // passed, though perhaps just barely 1255 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1256 // At most one persisted message per boost test. 1257 j = jiffies; 1258 lp = READ_ONCE(last_persist); 1259 if (time_after(j, lp + mininterval) && 1260 cmpxchg(&last_persist, lp, j) == lp) { 1261 if (cpu < 0) 1262 pr_info("Boost inversion persisted: QS from all CPUs\n"); 1263 else 1264 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1265 } 1266 return false; // passed on a technicality 1267 } 1268 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1269 n_rcu_torture_boost_failure++; 1270 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1271 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1272 current->rt_priority, gp_state, end - *start); 1273 cur_ops->gp_kthread_dbg(); 1274 // Recheck after print to flag grace period ending during splat. 1275 gp_done = cur_ops->poll_gp_state(gp_state); 1276 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1277 gp_done ? "ended already" : "still pending"); 1278 1279 } 1280 1281 return true; // failed 1282 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1283 *start = jiffies; 1284 } 1285 1286 return false; // passed 1287 } 1288 1289 static int rcu_torture_boost(void *arg) 1290 { 1291 unsigned long endtime; 1292 unsigned long gp_state; 1293 unsigned long gp_state_time; 1294 unsigned long oldstarttime; 1295 unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ; 1296 1297 if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) { 1298 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1299 } else { 1300 VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period"); 1301 while (time_before(jiffies, booststarttime)) { 1302 schedule_timeout_idle(HZ); 1303 if (kthread_should_stop()) 1304 goto cleanup; 1305 } 1306 VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period"); 1307 } 1308 1309 /* Set real-time priority. */ 1310 sched_set_fifo_low(current); 1311 1312 /* Each pass through the following loop does one boost-test cycle. */ 1313 do { 1314 bool failed = false; // Test failed already in this test interval 1315 bool gp_initiated = false; 1316 1317 if (kthread_should_stop()) 1318 goto checkwait; 1319 1320 /* Wait for the next test interval. */ 1321 oldstarttime = READ_ONCE(boost_starttime); 1322 while (time_before(jiffies, oldstarttime)) { 1323 schedule_timeout_interruptible(oldstarttime - jiffies); 1324 if (stutter_wait("rcu_torture_boost")) 1325 sched_set_fifo_low(current); 1326 if (torture_must_stop()) 1327 goto checkwait; 1328 } 1329 1330 // Do one boost-test interval. 1331 endtime = oldstarttime + test_boost_duration * HZ; 1332 while (time_before(jiffies, endtime)) { 1333 // Has current GP gone too long? 1334 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1335 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1336 // If we don't have a grace period in flight, start one. 1337 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1338 gp_state = cur_ops->start_gp_poll(); 1339 gp_initiated = true; 1340 gp_state_time = jiffies; 1341 } 1342 if (stutter_wait("rcu_torture_boost")) { 1343 sched_set_fifo_low(current); 1344 // If the grace period already ended, 1345 // we don't know when that happened, so 1346 // start over. 1347 if (cur_ops->poll_gp_state(gp_state)) 1348 gp_initiated = false; 1349 } 1350 if (torture_must_stop()) 1351 goto checkwait; 1352 } 1353 1354 // In case the grace period extended beyond the end of the loop. 1355 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1356 rcu_torture_boost_failed(gp_state, &gp_state_time); 1357 1358 /* 1359 * Set the start time of the next test interval. 1360 * Yes, this is vulnerable to long delays, but such 1361 * delays simply cause a false negative for the next 1362 * interval. Besides, we are running at RT priority, 1363 * so delays should be relatively rare. 1364 */ 1365 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1366 if (mutex_trylock(&boost_mutex)) { 1367 if (oldstarttime == boost_starttime) { 1368 WRITE_ONCE(boost_starttime, 1369 jiffies + test_boost_interval * HZ); 1370 n_rcu_torture_boosts++; 1371 } 1372 mutex_unlock(&boost_mutex); 1373 break; 1374 } 1375 schedule_timeout_uninterruptible(HZ / 20); 1376 } 1377 1378 /* Go do the stutter. */ 1379 checkwait: if (stutter_wait("rcu_torture_boost")) 1380 sched_set_fifo_low(current); 1381 } while (!torture_must_stop()); 1382 1383 cleanup: 1384 /* Clean up and exit. */ 1385 while (!kthread_should_stop()) { 1386 torture_shutdown_absorb("rcu_torture_boost"); 1387 schedule_timeout_uninterruptible(HZ / 20); 1388 } 1389 torture_kthread_stopping("rcu_torture_boost"); 1390 return 0; 1391 } 1392 1393 /* 1394 * RCU torture force-quiescent-state kthread. Repeatedly induces 1395 * bursts of calls to force_quiescent_state(), increasing the probability 1396 * of occurrence of some important types of race conditions. 1397 */ 1398 static int 1399 rcu_torture_fqs(void *arg) 1400 { 1401 unsigned long fqs_resume_time; 1402 int fqs_burst_remaining; 1403 int oldnice = task_nice(current); 1404 1405 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1406 do { 1407 fqs_resume_time = jiffies + fqs_stutter * HZ; 1408 while (time_before(jiffies, fqs_resume_time) && 1409 !kthread_should_stop()) { 1410 schedule_timeout_interruptible(HZ / 20); 1411 } 1412 fqs_burst_remaining = fqs_duration; 1413 while (fqs_burst_remaining > 0 && 1414 !kthread_should_stop()) { 1415 cur_ops->fqs(); 1416 udelay(fqs_holdoff); 1417 fqs_burst_remaining -= fqs_holdoff; 1418 } 1419 if (stutter_wait("rcu_torture_fqs")) 1420 sched_set_normal(current, oldnice); 1421 } while (!torture_must_stop()); 1422 torture_kthread_stopping("rcu_torture_fqs"); 1423 return 0; 1424 } 1425 1426 // Used by writers to randomly choose from the available grace-period primitives. 1427 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1428 static int nsynctypes; 1429 1430 /* 1431 * Determine which grace-period primitives are available. 1432 */ 1433 static void rcu_torture_write_types(void) 1434 { 1435 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1436 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1437 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1438 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1439 1440 /* Initialize synctype[] array. If none set, take default. */ 1441 if (!gp_cond1 && 1442 !gp_cond_exp1 && 1443 !gp_cond_full1 && 1444 !gp_cond_exp_full1 && 1445 !gp_exp1 && 1446 !gp_poll_exp1 && 1447 !gp_poll_exp_full1 && 1448 !gp_normal1 && 1449 !gp_poll1 && 1450 !gp_poll_full1 && 1451 !gp_sync1) { 1452 gp_cond1 = true; 1453 gp_cond_exp1 = true; 1454 gp_cond_full1 = true; 1455 gp_cond_exp_full1 = true; 1456 gp_exp1 = true; 1457 gp_poll_exp1 = true; 1458 gp_poll_exp_full1 = true; 1459 gp_normal1 = true; 1460 gp_poll1 = true; 1461 gp_poll_full1 = true; 1462 gp_sync1 = true; 1463 } 1464 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1465 synctype[nsynctypes++] = RTWS_COND_GET; 1466 pr_info("%s: Testing conditional GPs.\n", __func__); 1467 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1468 pr_alert("%s: gp_cond without primitives.\n", __func__); 1469 } 1470 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1471 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1472 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1473 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1474 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1475 } 1476 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1477 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1478 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1479 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1480 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1481 } 1482 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1483 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1484 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1485 } else if (gp_cond_exp_full && 1486 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1487 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1488 } 1489 if (gp_exp1 && cur_ops->exp_sync) { 1490 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1491 pr_info("%s: Testing expedited GPs.\n", __func__); 1492 } else if (gp_exp && !cur_ops->exp_sync) { 1493 pr_alert("%s: gp_exp without primitives.\n", __func__); 1494 } 1495 if (gp_normal1 && cur_ops->deferred_free) { 1496 synctype[nsynctypes++] = RTWS_DEF_FREE; 1497 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1498 } else if (gp_normal && !cur_ops->deferred_free) { 1499 pr_alert("%s: gp_normal without primitives.\n", __func__); 1500 } 1501 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1502 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1503 synctype[nsynctypes++] = RTWS_POLL_GET; 1504 pr_info("%s: Testing polling GPs.\n", __func__); 1505 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1506 pr_alert("%s: gp_poll without primitives.\n", __func__); 1507 } 1508 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1509 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1510 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1511 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1512 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1513 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1514 } 1515 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1516 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1517 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1518 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1519 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1520 } 1521 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1522 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1523 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1524 } else if (gp_poll_exp_full && 1525 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1526 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1527 } 1528 if (gp_sync1 && cur_ops->sync) { 1529 synctype[nsynctypes++] = RTWS_SYNC; 1530 pr_info("%s: Testing normal GPs.\n", __func__); 1531 } else if (gp_sync && !cur_ops->sync) { 1532 pr_alert("%s: gp_sync without primitives.\n", __func__); 1533 } 1534 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes); 1535 pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp); 1536 } 1537 1538 /* 1539 * Do the specified rcu_torture_writer() synchronous grace period, 1540 * while also testing out the polled APIs. Note well that the single-CPU 1541 * grace-period optimizations must be accounted for. 1542 */ 1543 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1544 { 1545 unsigned long cookie; 1546 struct rcu_gp_oldstate cookie_full; 1547 bool dopoll; 1548 bool dopoll_full; 1549 unsigned long r = torture_random(trsp); 1550 1551 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1552 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1553 if (dopoll || dopoll_full) 1554 cpus_read_lock(); 1555 if (dopoll) 1556 cookie = cur_ops->get_gp_state(); 1557 if (dopoll_full) 1558 cur_ops->get_gp_state_full(&cookie_full); 1559 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1560 sync(); 1561 sync(); 1562 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1563 "%s: Cookie check 3 failed %pS() online %*pbl.", 1564 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1565 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1566 "%s: Cookie check 4 failed %pS() online %*pbl", 1567 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1568 if (dopoll || dopoll_full) 1569 cpus_read_unlock(); 1570 } 1571 1572 /* 1573 * RCU torture writer kthread. Repeatedly substitutes a new structure 1574 * for that pointed to by rcu_torture_current, freeing the old structure 1575 * after a series of grace periods (the "pipeline"). 1576 */ 1577 static int 1578 rcu_torture_writer(void *arg) 1579 { 1580 bool booting_still = false; 1581 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1582 unsigned long cookie; 1583 struct rcu_gp_oldstate cookie_full; 1584 int expediting = 0; 1585 unsigned long gp_snap; 1586 unsigned long gp_snap1; 1587 struct rcu_gp_oldstate gp_snap_full; 1588 struct rcu_gp_oldstate gp_snap1_full; 1589 int i; 1590 int idx; 1591 unsigned long j; 1592 int oldnice = task_nice(current); 1593 struct rcu_gp_oldstate *rgo = NULL; 1594 int rgo_size = 0; 1595 struct rcu_torture *rp; 1596 struct rcu_torture *old_rp; 1597 static DEFINE_TORTURE_RANDOM(rand); 1598 unsigned long stallsdone = jiffies; 1599 bool stutter_waited; 1600 unsigned long *ulo = NULL; 1601 int ulo_size = 0; 1602 1603 // If a new stall test is added, this must be adjusted. 1604 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1605 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * 1606 HZ * (stall_cpu_repeat + 1); 1607 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1608 if (!can_expedite) 1609 pr_alert("%s" TORTURE_FLAG 1610 " GP expediting controlled from boot/sysfs for %s.\n", 1611 torture_type, cur_ops->name); 1612 if (WARN_ONCE(nsynctypes == 0, 1613 "%s: No update-side primitives.\n", __func__)) { 1614 /* 1615 * No updates primitives, so don't try updating. 1616 * The resulting test won't be testing much, hence the 1617 * above WARN_ONCE(). 1618 */ 1619 rcu_torture_writer_state = RTWS_STOPPING; 1620 torture_kthread_stopping("rcu_torture_writer"); 1621 return 0; 1622 } 1623 if (cur_ops->poll_active > 0) { 1624 ulo = kcalloc(cur_ops->poll_active, sizeof(*ulo), GFP_KERNEL); 1625 if (!WARN_ON(!ulo)) 1626 ulo_size = cur_ops->poll_active; 1627 } 1628 if (cur_ops->poll_active_full > 0) { 1629 rgo = kcalloc(cur_ops->poll_active_full, sizeof(*rgo), GFP_KERNEL); 1630 if (!WARN_ON(!rgo)) 1631 rgo_size = cur_ops->poll_active_full; 1632 } 1633 1634 // If the system is still booting, let it finish. 1635 j = jiffies; 1636 while (!torture_must_stop() && !rcu_inkernel_boot_has_ended()) { 1637 booting_still = true; 1638 schedule_timeout_interruptible(HZ); 1639 } 1640 if (booting_still) 1641 pr_alert("%s" TORTURE_FLAG " Waited %lu jiffies for boot to complete.\n", 1642 torture_type, jiffies - j); 1643 1644 do { 1645 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1646 torture_hrtimeout_us(500, 1000, &rand); 1647 rp = rcu_torture_alloc(); 1648 if (rp == NULL) 1649 continue; 1650 rp->rtort_pipe_count = 0; 1651 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1652 rcu_torture_writer_state = RTWS_DELAY; 1653 udelay(torture_random(&rand) & 0x3ff); 1654 rcu_torture_writer_state = RTWS_REPLACE; 1655 old_rp = rcu_dereference_check(rcu_torture_current, 1656 current == writer_task); 1657 rp->rtort_mbtest = 1; 1658 rcu_assign_pointer(rcu_torture_current, rp); 1659 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1660 if (old_rp) { 1661 i = old_rp->rtort_pipe_count; 1662 if (i > RCU_TORTURE_PIPE_LEN) 1663 i = RCU_TORTURE_PIPE_LEN; 1664 atomic_inc(&rcu_torture_wcount[i]); 1665 WRITE_ONCE(old_rp->rtort_pipe_count, 1666 old_rp->rtort_pipe_count + 1); 1667 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1668 1669 // Make sure readers block polled grace periods. 1670 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1671 idx = cur_ops->readlock(); 1672 cookie = cur_ops->get_gp_state(); 1673 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1674 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1675 __func__, 1676 rcu_torture_writer_state_getname(), 1677 rcu_torture_writer_state, 1678 cookie, cur_ops->get_gp_state()); 1679 if (cur_ops->get_comp_state) { 1680 cookie = cur_ops->get_comp_state(); 1681 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1682 } 1683 cur_ops->readunlock(idx); 1684 } 1685 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1686 idx = cur_ops->readlock(); 1687 cur_ops->get_gp_state_full(&cookie_full); 1688 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1689 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1690 __func__, 1691 rcu_torture_writer_state_getname(), 1692 rcu_torture_writer_state, 1693 cpumask_pr_args(cpu_online_mask)); 1694 if (cur_ops->get_comp_state_full) { 1695 cur_ops->get_comp_state_full(&cookie_full); 1696 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1697 } 1698 cur_ops->readunlock(idx); 1699 } 1700 switch (synctype[torture_random(&rand) % nsynctypes]) { 1701 case RTWS_DEF_FREE: 1702 rcu_torture_writer_state = RTWS_DEF_FREE; 1703 cur_ops->deferred_free(old_rp); 1704 break; 1705 case RTWS_EXP_SYNC: 1706 rcu_torture_writer_state = RTWS_EXP_SYNC; 1707 do_rtws_sync(&rand, cur_ops->exp_sync); 1708 rcu_torture_pipe_update(old_rp); 1709 break; 1710 case RTWS_COND_GET: 1711 rcu_torture_writer_state = RTWS_COND_GET; 1712 gp_snap = cur_ops->get_gp_state(); 1713 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1714 1000, &rand); 1715 rcu_torture_writer_state = RTWS_COND_SYNC; 1716 cur_ops->cond_sync(gp_snap); 1717 rcu_torture_pipe_update(old_rp); 1718 break; 1719 case RTWS_COND_GET_EXP: 1720 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1721 gp_snap = cur_ops->get_gp_state_exp(); 1722 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1723 1000, &rand); 1724 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1725 cur_ops->cond_sync_exp(gp_snap); 1726 rcu_torture_pipe_update(old_rp); 1727 break; 1728 case RTWS_COND_GET_FULL: 1729 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1730 cur_ops->get_gp_state_full(&gp_snap_full); 1731 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1732 1000, &rand); 1733 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1734 cur_ops->cond_sync_full(&gp_snap_full); 1735 rcu_torture_pipe_update(old_rp); 1736 break; 1737 case RTWS_COND_GET_EXP_FULL: 1738 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1739 cur_ops->get_gp_state_full(&gp_snap_full); 1740 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1741 1000, &rand); 1742 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1743 cur_ops->cond_sync_exp_full(&gp_snap_full); 1744 rcu_torture_pipe_update(old_rp); 1745 break; 1746 case RTWS_POLL_GET: 1747 rcu_torture_writer_state = RTWS_POLL_GET; 1748 for (i = 0; i < ulo_size; i++) 1749 ulo[i] = cur_ops->get_comp_state(); 1750 gp_snap = cur_ops->start_gp_poll(); 1751 rcu_torture_writer_state = RTWS_POLL_WAIT; 1752 if (cur_ops->exp_current && !(torture_random(&rand) & 0xff)) 1753 cur_ops->exp_current(); 1754 while (!cur_ops->poll_gp_state(gp_snap)) { 1755 gp_snap1 = cur_ops->get_gp_state(); 1756 for (i = 0; i < ulo_size; i++) 1757 if (cur_ops->poll_gp_state(ulo[i]) || 1758 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1759 ulo[i] = gp_snap1; 1760 break; 1761 } 1762 WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size); 1763 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1764 1000, &rand); 1765 } 1766 rcu_torture_pipe_update(old_rp); 1767 break; 1768 case RTWS_POLL_GET_FULL: 1769 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1770 for (i = 0; i < rgo_size; i++) 1771 cur_ops->get_comp_state_full(&rgo[i]); 1772 cur_ops->start_gp_poll_full(&gp_snap_full); 1773 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1774 if (cur_ops->exp_current && !(torture_random(&rand) & 0xff)) 1775 cur_ops->exp_current(); 1776 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1777 cur_ops->get_gp_state_full(&gp_snap1_full); 1778 for (i = 0; i < rgo_size; i++) 1779 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1780 cur_ops->same_gp_state_full(&rgo[i], 1781 &gp_snap1_full)) { 1782 rgo[i] = gp_snap1_full; 1783 break; 1784 } 1785 WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size); 1786 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1787 1000, &rand); 1788 } 1789 rcu_torture_pipe_update(old_rp); 1790 break; 1791 case RTWS_POLL_GET_EXP: 1792 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1793 gp_snap = cur_ops->start_gp_poll_exp(); 1794 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1795 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1796 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1797 1000, &rand); 1798 rcu_torture_pipe_update(old_rp); 1799 break; 1800 case RTWS_POLL_GET_EXP_FULL: 1801 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1802 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1803 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1804 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1805 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1806 1000, &rand); 1807 rcu_torture_pipe_update(old_rp); 1808 break; 1809 case RTWS_SYNC: 1810 rcu_torture_writer_state = RTWS_SYNC; 1811 do_rtws_sync(&rand, cur_ops->sync); 1812 rcu_torture_pipe_update(old_rp); 1813 break; 1814 default: 1815 WARN_ON_ONCE(1); 1816 break; 1817 } 1818 } 1819 WRITE_ONCE(rcu_torture_current_version, 1820 rcu_torture_current_version + 1); 1821 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1822 if (can_expedite && 1823 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1824 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1825 if (expediting >= 0) 1826 rcu_expedite_gp(); 1827 else 1828 rcu_unexpedite_gp(); 1829 if (++expediting > 3) 1830 expediting = -expediting; 1831 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1832 can_expedite = !rcu_gp_is_expedited() && 1833 !rcu_gp_is_normal(); 1834 } 1835 rcu_torture_writer_state = RTWS_STUTTER; 1836 stutter_waited = stutter_wait("rcu_torture_writer"); 1837 if (stutter_waited && 1838 !atomic_read(&rcu_fwd_cb_nodelay) && 1839 !cur_ops->slow_gps && 1840 !torture_must_stop() && 1841 time_after(jiffies, stallsdone)) 1842 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1843 if (list_empty(&rcu_tortures[i].rtort_free) && 1844 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1845 tracing_off(); 1846 if (cur_ops->gp_kthread_dbg) 1847 cur_ops->gp_kthread_dbg(); 1848 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1849 rcu_ftrace_dump(DUMP_ALL); 1850 break; 1851 } 1852 if (stutter_waited) 1853 sched_set_normal(current, oldnice); 1854 } while (!torture_must_stop()); 1855 rcu_torture_current = NULL; // Let stats task know that we are done. 1856 /* Reset expediting back to unexpedited. */ 1857 if (expediting > 0) 1858 expediting = -expediting; 1859 while (can_expedite && expediting++ < 0) 1860 rcu_unexpedite_gp(); 1861 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1862 if (!can_expedite) 1863 pr_alert("%s" TORTURE_FLAG 1864 " Dynamic grace-period expediting was disabled.\n", 1865 torture_type); 1866 kfree(ulo); 1867 kfree(rgo); 1868 rcu_torture_writer_state = RTWS_STOPPING; 1869 torture_kthread_stopping("rcu_torture_writer"); 1870 return 0; 1871 } 1872 1873 /* 1874 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1875 * delay between calls. 1876 */ 1877 static int 1878 rcu_torture_fakewriter(void *arg) 1879 { 1880 unsigned long gp_snap; 1881 struct rcu_gp_oldstate gp_snap_full; 1882 DEFINE_TORTURE_RANDOM(rand); 1883 1884 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1885 set_user_nice(current, MAX_NICE); 1886 1887 if (WARN_ONCE(nsynctypes == 0, 1888 "%s: No update-side primitives.\n", __func__)) { 1889 /* 1890 * No updates primitives, so don't try updating. 1891 * The resulting test won't be testing much, hence the 1892 * above WARN_ONCE(). 1893 */ 1894 torture_kthread_stopping("rcu_torture_fakewriter"); 1895 return 0; 1896 } 1897 1898 do { 1899 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1900 if (cur_ops->cb_barrier != NULL && 1901 torture_random(&rand) % (nrealfakewriters * 8) == 0) { 1902 cur_ops->cb_barrier(); 1903 } else { 1904 switch (synctype[torture_random(&rand) % nsynctypes]) { 1905 case RTWS_DEF_FREE: 1906 break; 1907 case RTWS_EXP_SYNC: 1908 cur_ops->exp_sync(); 1909 break; 1910 case RTWS_COND_GET: 1911 gp_snap = cur_ops->get_gp_state(); 1912 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1913 cur_ops->cond_sync(gp_snap); 1914 break; 1915 case RTWS_COND_GET_EXP: 1916 gp_snap = cur_ops->get_gp_state_exp(); 1917 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1918 cur_ops->cond_sync_exp(gp_snap); 1919 break; 1920 case RTWS_COND_GET_FULL: 1921 cur_ops->get_gp_state_full(&gp_snap_full); 1922 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1923 cur_ops->cond_sync_full(&gp_snap_full); 1924 break; 1925 case RTWS_COND_GET_EXP_FULL: 1926 cur_ops->get_gp_state_full(&gp_snap_full); 1927 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1928 cur_ops->cond_sync_exp_full(&gp_snap_full); 1929 break; 1930 case RTWS_POLL_GET: 1931 if (cur_ops->start_poll_irqsoff) 1932 local_irq_disable(); 1933 gp_snap = cur_ops->start_gp_poll(); 1934 if (cur_ops->start_poll_irqsoff) 1935 local_irq_enable(); 1936 while (!cur_ops->poll_gp_state(gp_snap)) { 1937 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1938 &rand); 1939 } 1940 break; 1941 case RTWS_POLL_GET_FULL: 1942 if (cur_ops->start_poll_irqsoff) 1943 local_irq_disable(); 1944 cur_ops->start_gp_poll_full(&gp_snap_full); 1945 if (cur_ops->start_poll_irqsoff) 1946 local_irq_enable(); 1947 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1948 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1949 &rand); 1950 } 1951 break; 1952 case RTWS_POLL_GET_EXP: 1953 gp_snap = cur_ops->start_gp_poll_exp(); 1954 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1955 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1956 &rand); 1957 } 1958 break; 1959 case RTWS_POLL_GET_EXP_FULL: 1960 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1961 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1962 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1963 &rand); 1964 } 1965 break; 1966 case RTWS_SYNC: 1967 cur_ops->sync(); 1968 break; 1969 default: 1970 WARN_ON_ONCE(1); 1971 break; 1972 } 1973 } 1974 stutter_wait("rcu_torture_fakewriter"); 1975 } while (!torture_must_stop()); 1976 1977 torture_kthread_stopping("rcu_torture_fakewriter"); 1978 return 0; 1979 } 1980 1981 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1982 { 1983 kfree(rhp); 1984 } 1985 1986 // Set up and carry out testing of RCU's global memory ordering 1987 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1988 struct torture_random_state *trsp) 1989 { 1990 unsigned long loops; 1991 int noc = torture_num_online_cpus(); 1992 int rdrchked; 1993 int rdrchker; 1994 struct rcu_torture_reader_check *rtrcp; // Me. 1995 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1996 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1997 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1998 1999 if (myid < 0) 2000 return; // Don't try this from timer handlers. 2001 2002 // Increment my counter. 2003 rtrcp = &rcu_torture_reader_mbchk[myid]; 2004 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 2005 2006 // Attempt to assign someone else some checking work. 2007 rdrchked = torture_random(trsp) % nrealreaders; 2008 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 2009 rdrchker = torture_random(trsp) % nrealreaders; 2010 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 2011 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 2012 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 2013 !READ_ONCE(rtp->rtort_chkp) && 2014 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 2015 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 2016 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 2017 rtrcp->rtc_chkrdr = rdrchked; 2018 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 2019 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 2020 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 2021 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 2022 } 2023 2024 // If assigned some completed work, do it! 2025 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 2026 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 2027 return; // No work or work not yet ready. 2028 rdrchked = rtrcp_assigner->rtc_chkrdr; 2029 if (WARN_ON_ONCE(rdrchked < 0)) 2030 return; 2031 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 2032 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 2033 atomic_inc(&n_rcu_torture_mbchk_tries); 2034 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 2035 atomic_inc(&n_rcu_torture_mbchk_fail); 2036 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 2037 rtrcp_assigner->rtc_ready = 0; 2038 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 2039 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 2040 } 2041 2042 // Verify the specified RCUTORTURE_RDR* state. 2043 #define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count() 2044 static void rcutorture_one_extend_check(char *s, int curstate, int new, int old) 2045 { 2046 int mask; 2047 2048 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE) || in_nmi()) 2049 return; 2050 2051 WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled() && !in_hardirq(), ROEC_ARGS); 2052 WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS); 2053 2054 // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable. 2055 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 2056 return; 2057 2058 WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 2059 !softirq_count(), ROEC_ARGS); 2060 WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) && 2061 !(preempt_count() & PREEMPT_MASK), ROEC_ARGS); 2062 WARN_ONCE(cur_ops->readlock_nesting && 2063 (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) && 2064 cur_ops->readlock_nesting() == 0, ROEC_ARGS); 2065 2066 // Interrupt handlers have all sorts of stuff disabled, so ignore 2067 // unintended disabling. 2068 if (in_serving_softirq() || in_hardirq()) 2069 return; 2070 2071 WARN_ONCE(cur_ops->extendables && 2072 !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 2073 softirq_count(), ROEC_ARGS); 2074 2075 /* 2076 * non-preemptible RCU in a preemptible kernel uses preempt_disable() 2077 * as rcu_read_lock(). 2078 */ 2079 mask = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2080 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 2081 mask |= RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2082 2083 WARN_ONCE(cur_ops->extendables && !(curstate & mask) && 2084 (preempt_count() & PREEMPT_MASK), ROEC_ARGS); 2085 2086 /* 2087 * non-preemptible RCU in a preemptible kernel uses "preempt_count() & 2088 * PREEMPT_MASK" as ->readlock_nesting(). 2089 */ 2090 mask = RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2091 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 2092 mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2093 2094 if (IS_ENABLED(CONFIG_PREEMPT_RT) && softirq_count()) 2095 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2096 2097 WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) && 2098 cur_ops->readlock_nesting() > 0, ROEC_ARGS); 2099 } 2100 2101 /* 2102 * Do one extension of an RCU read-side critical section using the 2103 * current reader state in readstate (set to zero for initial entry 2104 * to extended critical section), set the new state as specified by 2105 * newstate (set to zero for final exit from extended critical section), 2106 * and random-number-generator state in trsp. If this is neither the 2107 * beginning or end of the critical section and if there was actually a 2108 * change, do a ->read_delay(). 2109 */ 2110 static void rcutorture_one_extend(int *readstate, int newstate, struct torture_random_state *trsp, 2111 struct rt_read_seg *rtrsp) 2112 { 2113 bool first; 2114 unsigned long flags; 2115 int idxnew1 = -1; 2116 int idxnew2 = -1; 2117 int idxold1 = *readstate; 2118 int idxold2 = idxold1; 2119 int statesnew = ~*readstate & newstate; 2120 int statesold = *readstate & ~newstate; 2121 2122 first = idxold1 == 0; 2123 WARN_ON_ONCE(idxold2 < 0); 2124 WARN_ON_ONCE(idxold2 & ~(RCUTORTURE_RDR_ALLBITS | RCUTORTURE_RDR_UPDOWN)); 2125 rcutorture_one_extend_check("before change", idxold1, statesnew, statesold); 2126 rtrsp->rt_readstate = newstate; 2127 2128 /* First, put new protection in place to avoid critical-section gap. */ 2129 if (statesnew & RCUTORTURE_RDR_BH) 2130 local_bh_disable(); 2131 if (statesnew & RCUTORTURE_RDR_RBH) 2132 rcu_read_lock_bh(); 2133 if (statesnew & RCUTORTURE_RDR_IRQ) 2134 local_irq_disable(); 2135 if (statesnew & RCUTORTURE_RDR_PREEMPT) 2136 preempt_disable(); 2137 if (statesnew & RCUTORTURE_RDR_SCHED) 2138 rcu_read_lock_sched(); 2139 if (statesnew & RCUTORTURE_RDR_RCU_1) 2140 idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 2141 if (statesnew & RCUTORTURE_RDR_RCU_2) 2142 idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2; 2143 2144 // Complain unless both the old and the new protection is in place. 2145 rcutorture_one_extend_check("during change", idxold1 | statesnew, statesnew, statesold); 2146 2147 // Sample CPU under both sets of protections to reduce confusion. 2148 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 2149 int cpu = raw_smp_processor_id(); 2150 rtrsp->rt_cpu = cpu; 2151 if (!first) { 2152 rtrsp[-1].rt_end_cpu = cpu; 2153 if (cur_ops->reader_blocked) 2154 rtrsp[-1].rt_preempted = cur_ops->reader_blocked(); 2155 } 2156 } 2157 // Sample grace-period sequence number, as good a place as any. 2158 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) { 2159 rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs(); 2160 rtrsp->rt_ts = ktime_get_mono_fast_ns(); 2161 if (!first) 2162 rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq; 2163 } 2164 2165 /* 2166 * Next, remove old protection, in decreasing order of strength 2167 * to avoid unlock paths that aren't safe in the stronger 2168 * context. Namely: BH can not be enabled with disabled interrupts. 2169 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 2170 * context. 2171 */ 2172 if (statesold & RCUTORTURE_RDR_IRQ) 2173 local_irq_enable(); 2174 if (statesold & RCUTORTURE_RDR_PREEMPT) 2175 preempt_enable(); 2176 if (statesold & RCUTORTURE_RDR_SCHED) 2177 rcu_read_unlock_sched(); 2178 if (statesold & RCUTORTURE_RDR_BH) 2179 local_bh_enable(); 2180 if (statesold & RCUTORTURE_RDR_RBH) 2181 rcu_read_unlock_bh(); 2182 if (statesold & RCUTORTURE_RDR_RCU_2) { 2183 cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2); 2184 WARN_ON_ONCE(idxnew2 != -1); 2185 idxold2 = 0; 2186 } 2187 if (statesold & RCUTORTURE_RDR_RCU_1) { 2188 bool lockit; 2189 2190 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 2191 if (lockit) 2192 raw_spin_lock_irqsave(¤t->pi_lock, flags); 2193 cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 2194 WARN_ON_ONCE(idxnew1 != -1); 2195 idxold1 = 0; 2196 if (lockit) 2197 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 2198 } 2199 if (statesold & RCUTORTURE_RDR_UPDOWN) { 2200 cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 2201 WARN_ON_ONCE(idxnew1 != -1); 2202 idxold1 = 0; 2203 } 2204 2205 /* Delay if neither beginning nor end and there was a change. */ 2206 if ((statesnew || statesold) && *readstate && newstate) 2207 cur_ops->read_delay(trsp, rtrsp); 2208 2209 /* Update the reader state. */ 2210 if (idxnew1 == -1) 2211 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 2212 WARN_ON_ONCE(idxnew1 < 0); 2213 if (idxnew2 == -1) 2214 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 2215 WARN_ON_ONCE(idxnew2 < 0); 2216 *readstate = idxnew1 | idxnew2 | newstate; 2217 WARN_ON_ONCE(*readstate < 0); 2218 if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS)) 2219 pr_info("Unexpected readstate value of %#x\n", *readstate); 2220 rcutorture_one_extend_check("after change", *readstate, statesnew, statesold); 2221 } 2222 2223 /* Return the biggest extendables mask given current RCU and boot parameters. */ 2224 static int rcutorture_extend_mask_max(void) 2225 { 2226 int mask; 2227 2228 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 2229 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 2230 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2231 return mask; 2232 } 2233 2234 /* Return a random protection state mask, but with at least one bit set. */ 2235 static int 2236 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 2237 { 2238 int mask = rcutorture_extend_mask_max(); 2239 unsigned long randmask1 = torture_random(trsp); 2240 unsigned long randmask2 = randmask1 >> 3; 2241 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2242 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 2243 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2244 2245 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits. 2246 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 2247 if (!(randmask1 & 0x7)) 2248 mask = mask & randmask2; 2249 else 2250 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 2251 2252 // Can't have nested RCU reader without outer RCU reader. 2253 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 2254 if (oldmask & RCUTORTURE_RDR_RCU_1) 2255 mask &= ~RCUTORTURE_RDR_RCU_2; 2256 else 2257 mask |= RCUTORTURE_RDR_RCU_1; 2258 } 2259 2260 /* 2261 * Can't enable bh w/irq disabled. 2262 */ 2263 if (mask & RCUTORTURE_RDR_IRQ) 2264 mask |= oldmask & bhs; 2265 2266 /* 2267 * Ideally these sequences would be detected in debug builds 2268 * (regardless of RT), but until then don't stop testing 2269 * them on non-RT. 2270 */ 2271 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 2272 /* Can't modify BH in atomic context */ 2273 if (oldmask & preempts_irq) 2274 mask &= ~bhs; 2275 if ((oldmask | mask) & preempts_irq) 2276 mask |= oldmask & bhs; 2277 } 2278 2279 return mask ?: RCUTORTURE_RDR_RCU_1; 2280 } 2281 2282 /* 2283 * Do a randomly selected number of extensions of an existing RCU read-side 2284 * critical section. 2285 */ 2286 static struct rt_read_seg * 2287 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, struct rt_read_seg *rtrsp) 2288 { 2289 int i; 2290 int j; 2291 int mask = rcutorture_extend_mask_max(); 2292 2293 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 2294 if (!((mask - 1) & mask)) 2295 return rtrsp; /* Current RCU reader not extendable. */ 2296 /* Bias towards larger numbers of loops. */ 2297 i = torture_random(trsp); 2298 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 2299 for (j = 0; j < i; j++) { 2300 mask = rcutorture_extend_mask(*readstate, trsp); 2301 WARN_ON_ONCE(mask & RCUTORTURE_RDR_UPDOWN); 2302 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 2303 } 2304 return &rtrsp[j]; 2305 } 2306 2307 struct rcu_torture_one_read_state { 2308 bool checkpolling; 2309 unsigned long cookie; 2310 struct rcu_gp_oldstate cookie_full; 2311 unsigned long started; 2312 struct rcu_torture *p; 2313 int readstate; 2314 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS]; 2315 struct rt_read_seg *rtrsp; 2316 unsigned long long ts; 2317 }; 2318 2319 static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp, 2320 struct torture_random_state *trsp) 2321 { 2322 memset(rtorsp, 0, sizeof(*rtorsp)); 2323 rtorsp->checkpolling = !(torture_random(trsp) & 0xfff); 2324 rtorsp->rtrsp = &rtorsp->rtseg[0]; 2325 } 2326 2327 /* 2328 * Set up the first segment of a series of overlapping read-side 2329 * critical sections. The caller must have actually initiated the 2330 * outermost read-side critical section. 2331 */ 2332 static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp, 2333 struct torture_random_state *trsp, long myid) 2334 { 2335 if (rtorsp->checkpolling) { 2336 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2337 rtorsp->cookie = cur_ops->get_gp_state(); 2338 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2339 cur_ops->get_gp_state_full(&rtorsp->cookie_full); 2340 } 2341 rtorsp->started = cur_ops->get_gp_seq(); 2342 rtorsp->ts = rcu_trace_clock_local(); 2343 rtorsp->p = rcu_dereference_check(rcu_torture_current, 2344 !cur_ops->readlock_held || cur_ops->readlock_held() || 2345 (rtorsp->readstate & RCUTORTURE_RDR_UPDOWN)); 2346 if (rtorsp->p == NULL) { 2347 /* Wait for rcu_torture_writer to get underway */ 2348 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp); 2349 return false; 2350 } 2351 if (rtorsp->p->rtort_mbtest == 0) 2352 atomic_inc(&n_rcu_torture_mberror); 2353 rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp); 2354 return true; 2355 } 2356 2357 /* 2358 * Complete the last segment of a series of overlapping read-side 2359 * critical sections and check for errors. 2360 */ 2361 static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp, 2362 struct torture_random_state *trsp) 2363 { 2364 int i; 2365 unsigned long completed; 2366 int pipe_count; 2367 bool preempted = false; 2368 struct rt_read_seg *rtrsp1; 2369 2370 preempt_disable(); 2371 pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count); 2372 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2373 // Should not happen in a correct RCU implementation, 2374 // happens quite often for torture_type=busted. 2375 pipe_count = RCU_TORTURE_PIPE_LEN; 2376 } 2377 completed = cur_ops->get_gp_seq(); 2378 if (pipe_count > 1) { 2379 do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu, 2380 rtorsp->ts, rtorsp->started, completed); 2381 rcu_ftrace_dump(DUMP_ALL); 2382 } 2383 __this_cpu_inc(rcu_torture_count[pipe_count]); 2384 completed = rcutorture_seq_diff(completed, rtorsp->started); 2385 if (completed > RCU_TORTURE_PIPE_LEN) { 2386 /* Should not happen, but... */ 2387 completed = RCU_TORTURE_PIPE_LEN; 2388 } 2389 __this_cpu_inc(rcu_torture_batch[completed]); 2390 preempt_enable(); 2391 if (rtorsp->checkpolling) { 2392 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2393 WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie), 2394 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2395 __func__, 2396 rcu_torture_writer_state_getname(), 2397 rcu_torture_writer_state, 2398 rtorsp->cookie, cur_ops->get_gp_state()); 2399 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2400 WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full), 2401 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2402 __func__, 2403 rcu_torture_writer_state_getname(), 2404 rcu_torture_writer_state, 2405 cpumask_pr_args(cpu_online_mask)); 2406 } 2407 if (cur_ops->reader_blocked) 2408 preempted = cur_ops->reader_blocked(); 2409 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp); 2410 WARN_ON_ONCE(rtorsp->readstate); 2411 // This next splat is expected behavior if leakpointer, especially 2412 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2413 WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1); 2414 2415 /* If error or close call, record the sequence of reader protections. */ 2416 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2417 i = 0; 2418 for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++) 2419 err_segs[i++] = *rtrsp1; 2420 rt_read_nsegs = i; 2421 rt_read_preempted = preempted; 2422 } 2423 } 2424 2425 /* 2426 * Do one read-side critical section, returning false if there was 2427 * no data to read. Can be invoked both from process context and 2428 * from a timer handler. 2429 */ 2430 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 2431 { 2432 int newstate; 2433 struct rcu_torture_one_read_state rtors; 2434 2435 WARN_ON_ONCE(!rcu_is_watching()); 2436 init_rcu_torture_one_read_state(&rtors, trsp); 2437 newstate = rcutorture_extend_mask(rtors.readstate, trsp); 2438 WARN_ON_ONCE(newstate & RCUTORTURE_RDR_UPDOWN); 2439 rcutorture_one_extend(&rtors.readstate, newstate, trsp, rtors.rtrsp++); 2440 if (!rcu_torture_one_read_start(&rtors, trsp, myid)) 2441 return false; 2442 rtors.rtrsp = rcutorture_loop_extend(&rtors.readstate, trsp, rtors.rtrsp); 2443 rcu_torture_one_read_end(&rtors, trsp); 2444 return true; 2445 } 2446 2447 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2448 2449 /* 2450 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2451 * incrementing the corresponding element of the pipeline array. The 2452 * counter in the element should never be greater than 1, otherwise, the 2453 * RCU implementation is broken. 2454 */ 2455 static void rcu_torture_timer(struct timer_list *unused) 2456 { 2457 WARN_ON_ONCE(!in_serving_softirq()); 2458 WARN_ON_ONCE(in_hardirq()); 2459 WARN_ON_ONCE(in_nmi()); 2460 atomic_long_inc(&n_rcu_torture_timers); 2461 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2462 2463 /* Test call_rcu() invocation from interrupt handler. */ 2464 if (cur_ops->call) { 2465 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2466 2467 if (rhp) 2468 cur_ops->call(rhp, rcu_torture_timer_cb); 2469 } 2470 } 2471 2472 /* 2473 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2474 * incrementing the corresponding element of the pipeline array. The 2475 * counter in the element should never be greater than 1, otherwise, the 2476 * RCU implementation is broken. 2477 */ 2478 static int 2479 rcu_torture_reader(void *arg) 2480 { 2481 unsigned long lastsleep = jiffies; 2482 long myid = (long)arg; 2483 int mynumonline = myid; 2484 DEFINE_TORTURE_RANDOM(rand); 2485 struct timer_list t; 2486 2487 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2488 set_user_nice(current, MAX_NICE); 2489 if (irqreader && cur_ops->irq_capable) 2490 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2491 tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. 2492 do { 2493 if (irqreader && cur_ops->irq_capable) { 2494 if (!timer_pending(&t)) 2495 mod_timer(&t, jiffies + 1); 2496 } 2497 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2498 schedule_timeout_interruptible(HZ); 2499 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2500 torture_hrtimeout_us(500, 1000, &rand); 2501 lastsleep = jiffies + 10; 2502 } 2503 while (!torture_must_stop() && 2504 (torture_num_online_cpus() < mynumonline || !rcu_inkernel_boot_has_ended())) 2505 schedule_timeout_interruptible(HZ / 5); 2506 stutter_wait("rcu_torture_reader"); 2507 } while (!torture_must_stop()); 2508 if (irqreader && cur_ops->irq_capable) { 2509 timer_delete_sync(&t); 2510 timer_destroy_on_stack(&t); 2511 } 2512 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2513 torture_kthread_stopping("rcu_torture_reader"); 2514 return 0; 2515 } 2516 2517 struct rcu_torture_one_read_state_updown { 2518 struct hrtimer rtorsu_hrt; 2519 bool rtorsu_inuse; 2520 ktime_t rtorsu_kt; 2521 int rtorsu_cpu; 2522 unsigned long rtorsu_j; 2523 unsigned long rtorsu_ndowns; 2524 unsigned long rtorsu_nups; 2525 unsigned long rtorsu_nmigrates; 2526 struct torture_random_state rtorsu_trs; 2527 struct rcu_torture_one_read_state rtorsu_rtors; 2528 }; 2529 2530 static struct rcu_torture_one_read_state_updown *updownreaders; 2531 static DEFINE_TORTURE_RANDOM(rcu_torture_updown_rand); 2532 static int rcu_torture_updown(void *arg); 2533 2534 static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp) 2535 { 2536 int cpu = raw_smp_processor_id(); 2537 struct rcu_torture_one_read_state_updown *rtorsup; 2538 2539 rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt); 2540 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2541 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2542 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2543 WRITE_ONCE(rtorsup->rtorsu_nmigrates, 2544 rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu)); 2545 smp_store_release(&rtorsup->rtorsu_inuse, false); 2546 return HRTIMER_NORESTART; 2547 } 2548 2549 static int rcu_torture_updown_init(void) 2550 { 2551 int i; 2552 struct torture_random_state *rand = &rcu_torture_updown_rand; 2553 int ret; 2554 2555 if (n_up_down < 0) 2556 return 0; 2557 if (!srcu_torture_have_up_down()) { 2558 VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives"); 2559 return 0; 2560 } 2561 updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL); 2562 if (!updownreaders) { 2563 VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests"); 2564 return -ENOMEM; 2565 } 2566 for (i = 0; i < n_up_down; i++) { 2567 init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, rand); 2568 hrtimer_setup(&updownreaders[i].rtorsu_hrt, rcu_torture_updown_hrt, CLOCK_MONOTONIC, 2569 HRTIMER_MODE_REL | HRTIMER_MODE_HARD); 2570 torture_random_init(&updownreaders[i].rtorsu_trs); 2571 init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, 2572 &updownreaders[i].rtorsu_trs); 2573 } 2574 ret = torture_create_kthread(rcu_torture_updown, rand, updown_task); 2575 if (ret) { 2576 kfree(updownreaders); 2577 updownreaders = NULL; 2578 } 2579 return ret; 2580 } 2581 2582 static void rcu_torture_updown_cleanup(void) 2583 { 2584 struct rcu_torture_one_read_state_updown *rtorsup; 2585 2586 for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { 2587 if (!smp_load_acquire(&rtorsup->rtorsu_inuse)) 2588 continue; 2589 if (hrtimer_cancel(&rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) { 2590 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2591 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2592 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2593 smp_store_release(&rtorsup->rtorsu_inuse, false); 2594 } 2595 2596 } 2597 kfree(updownreaders); 2598 updownreaders = NULL; 2599 } 2600 2601 // Do one reader for rcu_torture_updown(). 2602 static void rcu_torture_updown_one(struct rcu_torture_one_read_state_updown *rtorsup) 2603 { 2604 int idx; 2605 int rawidx; 2606 ktime_t t; 2607 2608 init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2609 rawidx = cur_ops->down_read(); 2610 WRITE_ONCE(rtorsup->rtorsu_ndowns, rtorsup->rtorsu_ndowns + 1); 2611 idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 2612 rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN; 2613 rtorsup->rtorsu_rtors.rtrsp++; 2614 rtorsup->rtorsu_cpu = raw_smp_processor_id(); 2615 if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1)) { 2616 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2617 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2618 schedule_timeout_idle(HZ); 2619 return; 2620 } 2621 smp_store_release(&rtorsup->rtorsu_inuse, true); 2622 t = torture_random(&rtorsup->rtorsu_trs) & 0xfffff; // One per million. 2623 if (t < 10 * 1000) 2624 t = 200 * 1000 * 1000; 2625 hrtimer_start(&rtorsup->rtorsu_hrt, t, HRTIMER_MODE_REL | HRTIMER_MODE_HARD); 2626 smp_mb(); // Sample jiffies after posting hrtimer. 2627 rtorsup->rtorsu_j = jiffies; // Not used by hrtimer handler. 2628 rtorsup->rtorsu_kt = t; 2629 } 2630 2631 /* 2632 * RCU torture up/down reader kthread, starting RCU readers in kthread 2633 * context and ending them in hrtimer handlers. Otherwise similar to 2634 * rcu_torture_reader(). 2635 */ 2636 static int 2637 rcu_torture_updown(void *arg) 2638 { 2639 unsigned long j; 2640 struct rcu_torture_one_read_state_updown *rtorsup; 2641 2642 VERBOSE_TOROUT_STRING("rcu_torture_updown task started"); 2643 do { 2644 for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { 2645 if (torture_must_stop()) 2646 break; 2647 j = smp_load_acquire(&jiffies); // Time before ->rtorsu_inuse. 2648 if (smp_load_acquire(&rtorsup->rtorsu_inuse)) { 2649 WARN_ONCE(time_after(j, rtorsup->rtorsu_j + 1 + HZ * 10), 2650 "hrtimer queued at jiffies %lu for %lld ns took %lu jiffies\n", rtorsup->rtorsu_j, rtorsup->rtorsu_kt, j - rtorsup->rtorsu_j); 2651 continue; 2652 } 2653 rcu_torture_updown_one(rtorsup); 2654 } 2655 torture_hrtimeout_ms(1, 1000, &rcu_torture_updown_rand); 2656 stutter_wait("rcu_torture_updown"); 2657 } while (!torture_must_stop()); 2658 rcu_torture_updown_cleanup(); 2659 torture_kthread_stopping("rcu_torture_updown"); 2660 return 0; 2661 } 2662 2663 /* 2664 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2665 * increase race probabilities and fuzzes the interval between toggling. 2666 */ 2667 static int rcu_nocb_toggle(void *arg) 2668 { 2669 int cpu; 2670 int maxcpu = -1; 2671 int oldnice = task_nice(current); 2672 long r; 2673 DEFINE_TORTURE_RANDOM(rand); 2674 ktime_t toggle_delay; 2675 unsigned long toggle_fuzz; 2676 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2677 2678 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2679 while (!rcu_inkernel_boot_has_ended()) 2680 schedule_timeout_interruptible(HZ / 10); 2681 for_each_possible_cpu(cpu) 2682 maxcpu = cpu; 2683 WARN_ON(maxcpu < 0); 2684 if (toggle_interval > ULONG_MAX) 2685 toggle_fuzz = ULONG_MAX >> 3; 2686 else 2687 toggle_fuzz = toggle_interval >> 3; 2688 if (toggle_fuzz <= 0) 2689 toggle_fuzz = NSEC_PER_USEC; 2690 do { 2691 r = torture_random(&rand); 2692 cpu = (r >> 1) % (maxcpu + 1); 2693 if (r & 0x1) { 2694 rcu_nocb_cpu_offload(cpu); 2695 atomic_long_inc(&n_nocb_offload); 2696 } else { 2697 rcu_nocb_cpu_deoffload(cpu); 2698 atomic_long_inc(&n_nocb_deoffload); 2699 } 2700 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2701 set_current_state(TASK_INTERRUPTIBLE); 2702 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2703 if (stutter_wait("rcu_nocb_toggle")) 2704 sched_set_normal(current, oldnice); 2705 } while (!torture_must_stop()); 2706 torture_kthread_stopping("rcu_nocb_toggle"); 2707 return 0; 2708 } 2709 2710 /* 2711 * Print torture statistics. Caller must ensure that there is only 2712 * one call to this function at a given time!!! This is normally 2713 * accomplished by relying on the module system to only have one copy 2714 * of the module loaded, and then by giving the rcu_torture_stats 2715 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2716 * thread is not running). 2717 */ 2718 static void 2719 rcu_torture_stats_print(void) 2720 { 2721 int cpu; 2722 int i; 2723 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2724 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2725 long n_gpwraps = 0; 2726 unsigned long ndowns = 0; 2727 unsigned long nunexpired = 0; 2728 unsigned long nmigrates = 0; 2729 unsigned long nups = 0; 2730 struct rcu_torture *rtcp; 2731 static unsigned long rtcv_snap = ULONG_MAX; 2732 static bool splatted; 2733 struct task_struct *wtp; 2734 2735 for_each_possible_cpu(cpu) { 2736 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2737 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2738 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2739 } 2740 if (cur_ops->get_gpwrap_count) 2741 n_gpwraps += cur_ops->get_gpwrap_count(cpu); 2742 } 2743 if (updownreaders) { 2744 for (i = 0; i < n_up_down; i++) { 2745 ndowns += READ_ONCE(updownreaders[i].rtorsu_ndowns); 2746 nups += READ_ONCE(updownreaders[i].rtorsu_nups); 2747 nunexpired += READ_ONCE(updownreaders[i].rtorsu_inuse); 2748 nmigrates += READ_ONCE(updownreaders[i].rtorsu_nmigrates); 2749 } 2750 } 2751 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2752 if (pipesummary[i] != 0) 2753 break; 2754 } // The value of variable "i" is used later, so don't clobber it! 2755 2756 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2757 rtcp = rcu_access_pointer(rcu_torture_current); 2758 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2759 rtcp, 2760 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2761 rcu_torture_current_version, 2762 list_empty(&rcu_torture_freelist), 2763 atomic_read(&n_rcu_torture_alloc), 2764 atomic_read(&n_rcu_torture_alloc_fail), 2765 atomic_read(&n_rcu_torture_free)); 2766 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2767 atomic_read(&n_rcu_torture_mberror), 2768 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2769 n_rcu_torture_barrier_error, 2770 n_rcu_torture_boost_ktrerror); 2771 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2772 n_rcu_torture_boost_failure, 2773 n_rcu_torture_boosts, 2774 atomic_long_read(&n_rcu_torture_timers)); 2775 if (updownreaders) 2776 pr_cont("ndowns: %lu nups: %lu nhrt: %lu nmigrates: %lu ", ndowns, nups, nunexpired, nmigrates); 2777 torture_onoff_stats(); 2778 pr_cont("barrier: %ld/%ld:%ld ", 2779 data_race(n_barrier_successes), 2780 data_race(n_barrier_attempts), 2781 data_race(n_rcu_torture_barrier_error)); 2782 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2783 pr_cont("nocb-toggles: %ld:%ld ", 2784 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2785 pr_cont("gpwraps: %ld\n", n_gpwraps); 2786 2787 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2788 if (atomic_read(&n_rcu_torture_mberror) || 2789 atomic_read(&n_rcu_torture_mbchk_fail) || 2790 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2791 n_rcu_torture_boost_failure || i > 1) { 2792 pr_cont("%s", "!!! "); 2793 atomic_inc(&n_rcu_torture_error); 2794 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2795 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2796 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2797 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2798 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2799 WARN_ON_ONCE(i > 1); // Too-short grace period 2800 } 2801 pr_cont("Reader Pipe: "); 2802 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2803 pr_cont(" %ld", pipesummary[i]); 2804 pr_cont("\n"); 2805 2806 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2807 pr_cont("Reader Batch: "); 2808 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2809 pr_cont(" %ld", batchsummary[i]); 2810 pr_cont("\n"); 2811 2812 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2813 pr_cont("Free-Block Circulation: "); 2814 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2815 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2816 } 2817 pr_cont("\n"); 2818 2819 if (cur_ops->stats) 2820 cur_ops->stats(); 2821 if (rtcv_snap == rcu_torture_current_version && 2822 rcu_access_pointer(rcu_torture_current) && 2823 !rcu_stall_is_suppressed() && 2824 rcu_inkernel_boot_has_ended()) { 2825 int __maybe_unused flags = 0; 2826 unsigned long __maybe_unused gp_seq = 0; 2827 2828 if (cur_ops->get_gp_data) 2829 cur_ops->get_gp_data(&flags, &gp_seq); 2830 wtp = READ_ONCE(writer_task); 2831 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2832 rcu_torture_writer_state_getname(), 2833 rcu_torture_writer_state, gp_seq, flags, 2834 wtp == NULL ? ~0U : wtp->__state, 2835 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2836 if (!splatted && wtp) { 2837 sched_show_task(wtp); 2838 splatted = true; 2839 } 2840 if (cur_ops->gp_kthread_dbg) 2841 cur_ops->gp_kthread_dbg(); 2842 rcu_ftrace_dump(DUMP_ALL); 2843 } 2844 rtcv_snap = rcu_torture_current_version; 2845 } 2846 2847 /* 2848 * Periodically prints torture statistics, if periodic statistics printing 2849 * was specified via the stat_interval module parameter. 2850 */ 2851 static int 2852 rcu_torture_stats(void *arg) 2853 { 2854 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2855 do { 2856 schedule_timeout_interruptible(stat_interval * HZ); 2857 rcu_torture_stats_print(); 2858 torture_shutdown_absorb("rcu_torture_stats"); 2859 } while (!torture_must_stop()); 2860 torture_kthread_stopping("rcu_torture_stats"); 2861 return 0; 2862 } 2863 2864 /* Test mem_dump_obj() and friends. */ 2865 static void rcu_torture_mem_dump_obj(void) 2866 { 2867 struct rcu_head *rhp; 2868 struct kmem_cache *kcp; 2869 static int z; 2870 2871 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2872 if (WARN_ON_ONCE(!kcp)) 2873 return; 2874 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2875 if (WARN_ON_ONCE(!rhp)) { 2876 kmem_cache_destroy(kcp); 2877 return; 2878 } 2879 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2880 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2881 mem_dump_obj(ZERO_SIZE_PTR); 2882 pr_alert("mem_dump_obj(NULL):"); 2883 mem_dump_obj(NULL); 2884 pr_alert("mem_dump_obj(%px):", &rhp); 2885 mem_dump_obj(&rhp); 2886 pr_alert("mem_dump_obj(%px):", rhp); 2887 mem_dump_obj(rhp); 2888 pr_alert("mem_dump_obj(%px):", &rhp->func); 2889 mem_dump_obj(&rhp->func); 2890 pr_alert("mem_dump_obj(%px):", &z); 2891 mem_dump_obj(&z); 2892 kmem_cache_free(kcp, rhp); 2893 kmem_cache_destroy(kcp); 2894 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2895 if (WARN_ON_ONCE(!rhp)) 2896 return; 2897 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2898 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2899 mem_dump_obj(rhp); 2900 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2901 mem_dump_obj(&rhp->func); 2902 kfree(rhp); 2903 rhp = vmalloc(4096); 2904 if (WARN_ON_ONCE(!rhp)) 2905 return; 2906 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2907 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2908 mem_dump_obj(rhp); 2909 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2910 mem_dump_obj(&rhp->func); 2911 vfree(rhp); 2912 } 2913 2914 static void 2915 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2916 { 2917 pr_alert("%s" TORTURE_FLAG 2918 "--- %s: nreaders=%d nfakewriters=%d " 2919 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2920 "shuffle_interval=%d stutter=%d irqreader=%d " 2921 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2922 "test_boost=%d/%d test_boost_interval=%d " 2923 "test_boost_duration=%d test_boost_holdoff=%d shutdown_secs=%d " 2924 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2925 "stall_cpu_block=%d stall_cpu_repeat=%d " 2926 "n_barrier_cbs=%d " 2927 "onoff_interval=%d onoff_holdoff=%d " 2928 "read_exit_delay=%d read_exit_burst=%d " 2929 "reader_flavor=%x " 2930 "nocbs_nthreads=%d nocbs_toggle=%d " 2931 "test_nmis=%d " 2932 "preempt_duration=%d preempt_interval=%d n_up_down=%d\n", 2933 torture_type, tag, nrealreaders, nrealfakewriters, 2934 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2935 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2936 test_boost, cur_ops->can_boost, 2937 test_boost_interval, test_boost_duration, test_boost_holdoff, shutdown_secs, 2938 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2939 stall_cpu_block, stall_cpu_repeat, 2940 n_barrier_cbs, 2941 onoff_interval, onoff_holdoff, 2942 read_exit_delay, read_exit_burst, 2943 reader_flavor, 2944 nocbs_nthreads, nocbs_toggle, 2945 test_nmis, 2946 preempt_duration, preempt_interval, n_up_down); 2947 } 2948 2949 static int rcutorture_booster_cleanup(unsigned int cpu) 2950 { 2951 struct task_struct *t; 2952 2953 if (boost_tasks[cpu] == NULL) 2954 return 0; 2955 mutex_lock(&boost_mutex); 2956 t = boost_tasks[cpu]; 2957 boost_tasks[cpu] = NULL; 2958 rcu_torture_enable_rt_throttle(); 2959 mutex_unlock(&boost_mutex); 2960 2961 /* This must be outside of the mutex, otherwise deadlock! */ 2962 torture_stop_kthread(rcu_torture_boost, t); 2963 return 0; 2964 } 2965 2966 static int rcutorture_booster_init(unsigned int cpu) 2967 { 2968 int retval; 2969 2970 if (boost_tasks[cpu] != NULL) 2971 return 0; /* Already created, nothing more to do. */ 2972 2973 // Testing RCU priority boosting requires rcutorture do 2974 // some serious abuse. Counter this by running ksoftirqd 2975 // at higher priority. 2976 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2977 struct sched_param sp; 2978 struct task_struct *t; 2979 2980 t = per_cpu(ksoftirqd, cpu); 2981 WARN_ON_ONCE(!t); 2982 sp.sched_priority = 2; 2983 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2984 #ifdef CONFIG_IRQ_FORCED_THREADING 2985 if (force_irqthreads()) { 2986 t = per_cpu(ktimerd, cpu); 2987 WARN_ON_ONCE(!t); 2988 sp.sched_priority = 2; 2989 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2990 } 2991 #endif 2992 } 2993 2994 /* Don't allow time recalculation while creating a new task. */ 2995 mutex_lock(&boost_mutex); 2996 rcu_torture_disable_rt_throttle(); 2997 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2998 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2999 cpu, "rcu_torture_boost_%u"); 3000 if (IS_ERR(boost_tasks[cpu])) { 3001 retval = PTR_ERR(boost_tasks[cpu]); 3002 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 3003 n_rcu_torture_boost_ktrerror++; 3004 boost_tasks[cpu] = NULL; 3005 mutex_unlock(&boost_mutex); 3006 return retval; 3007 } 3008 mutex_unlock(&boost_mutex); 3009 return 0; 3010 } 3011 3012 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 3013 { 3014 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 3015 return NOTIFY_OK; 3016 } 3017 3018 static struct notifier_block rcu_torture_stall_block = { 3019 .notifier_call = rcu_torture_stall_nf, 3020 }; 3021 3022 /* 3023 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 3024 * induces a CPU stall for the time specified by stall_cpu. If a new 3025 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 3026 */ 3027 static void rcu_torture_stall_one(int rep, int irqsoff) 3028 { 3029 int idx; 3030 unsigned long stop_at; 3031 3032 if (stall_cpu_holdoff > 0) { 3033 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 3034 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 3035 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 3036 } 3037 if (!kthread_should_stop() && stall_gp_kthread > 0) { 3038 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 3039 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 3040 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 3041 if (kthread_should_stop()) 3042 break; 3043 schedule_timeout_uninterruptible(HZ); 3044 } 3045 } 3046 if (!kthread_should_stop() && stall_cpu > 0) { 3047 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 3048 stop_at = ktime_get_seconds() + stall_cpu; 3049 /* RCU CPU stall is expected behavior in following code. */ 3050 idx = cur_ops->readlock(); 3051 if (irqsoff) 3052 local_irq_disable(); 3053 else if (!stall_cpu_block) 3054 preempt_disable(); 3055 pr_alert("%s start stall episode %d on CPU %d.\n", 3056 __func__, rep + 1, raw_smp_processor_id()); 3057 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 3058 !kthread_should_stop()) 3059 if (stall_cpu_block) { 3060 #ifdef CONFIG_PREEMPTION 3061 preempt_schedule(); 3062 #else 3063 schedule_timeout_uninterruptible(HZ); 3064 #endif 3065 } else if (stall_no_softlockup) { 3066 touch_softlockup_watchdog(); 3067 } 3068 if (irqsoff) 3069 local_irq_enable(); 3070 else if (!stall_cpu_block) 3071 preempt_enable(); 3072 cur_ops->readunlock(idx); 3073 } 3074 } 3075 3076 /* 3077 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many 3078 * additional times as specified by the stall_cpu_repeat module parameter. 3079 * Note that stall_cpu_irqsoff is ignored on the second and subsequent 3080 * stall. 3081 */ 3082 static int rcu_torture_stall(void *args) 3083 { 3084 int i; 3085 int repeat = stall_cpu_repeat; 3086 int ret; 3087 3088 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 3089 if (repeat < 0) { 3090 repeat = 0; 3091 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); 3092 } 3093 if (rcu_cpu_stall_notifiers) { 3094 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 3095 if (ret) 3096 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 3097 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 3098 } 3099 for (i = 0; i <= repeat; i++) { 3100 if (kthread_should_stop()) 3101 break; 3102 rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0); 3103 } 3104 pr_alert("%s end.\n", __func__); 3105 if (rcu_cpu_stall_notifiers && !ret) { 3106 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 3107 if (ret) 3108 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 3109 } 3110 torture_shutdown_absorb("rcu_torture_stall"); 3111 while (!kthread_should_stop()) 3112 schedule_timeout_interruptible(10 * HZ); 3113 return 0; 3114 } 3115 3116 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 3117 static int __init rcu_torture_stall_init(void) 3118 { 3119 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 3120 return 0; 3121 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 3122 } 3123 3124 /* State structure for forward-progress self-propagating RCU callback. */ 3125 struct fwd_cb_state { 3126 struct rcu_head rh; 3127 int stop; 3128 }; 3129 3130 /* 3131 * Forward-progress self-propagating RCU callback function. Because 3132 * callbacks run from softirq, this function is an implicit RCU read-side 3133 * critical section. 3134 */ 3135 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 3136 { 3137 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 3138 3139 if (READ_ONCE(fcsp->stop)) { 3140 WRITE_ONCE(fcsp->stop, 2); 3141 return; 3142 } 3143 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 3144 } 3145 3146 /* State for continuous-flood RCU callbacks. */ 3147 struct rcu_fwd_cb { 3148 struct rcu_head rh; 3149 struct rcu_fwd_cb *rfc_next; 3150 struct rcu_fwd *rfc_rfp; 3151 int rfc_gps; 3152 }; 3153 3154 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 3155 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 3156 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 3157 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 3158 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 3159 3160 struct rcu_launder_hist { 3161 long n_launders; 3162 unsigned long launder_gp_seq; 3163 }; 3164 3165 struct rcu_fwd { 3166 spinlock_t rcu_fwd_lock; 3167 struct rcu_fwd_cb *rcu_fwd_cb_head; 3168 struct rcu_fwd_cb **rcu_fwd_cb_tail; 3169 long n_launders_cb; 3170 unsigned long rcu_fwd_startat; 3171 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 3172 unsigned long rcu_launder_gp_seq_start; 3173 int rcu_fwd_id; 3174 }; 3175 3176 static DEFINE_MUTEX(rcu_fwd_mutex); 3177 static struct rcu_fwd *rcu_fwds; 3178 static unsigned long rcu_fwd_seq; 3179 static atomic_long_t rcu_fwd_max_cbs; 3180 static bool rcu_fwd_emergency_stop; 3181 3182 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 3183 { 3184 unsigned long gps; 3185 unsigned long gps_old; 3186 int i; 3187 int j; 3188 3189 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 3190 if (rfp->n_launders_hist[i].n_launders > 0) 3191 break; 3192 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 3193 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 3194 gps_old = rfp->rcu_launder_gp_seq_start; 3195 for (j = 0; j <= i; j++) { 3196 gps = rfp->n_launders_hist[j].launder_gp_seq; 3197 pr_cont(" %ds/%d: %ld:%ld", 3198 j + 1, FWD_CBS_HIST_DIV, 3199 rfp->n_launders_hist[j].n_launders, 3200 rcutorture_seq_diff(gps, gps_old)); 3201 gps_old = gps; 3202 } 3203 pr_cont("\n"); 3204 } 3205 3206 /* Callback function for continuous-flood RCU callbacks. */ 3207 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 3208 { 3209 unsigned long flags; 3210 int i; 3211 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 3212 struct rcu_fwd_cb **rfcpp; 3213 struct rcu_fwd *rfp = rfcp->rfc_rfp; 3214 3215 rfcp->rfc_next = NULL; 3216 rfcp->rfc_gps++; 3217 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 3218 rfcpp = rfp->rcu_fwd_cb_tail; 3219 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 3220 smp_store_release(rfcpp, rfcp); 3221 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 3222 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 3223 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 3224 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 3225 rfp->n_launders_hist[i].n_launders++; 3226 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 3227 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3228 } 3229 3230 // Give the scheduler a chance, even on nohz_full CPUs. 3231 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 3232 { 3233 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 3234 // Real call_rcu() floods hit userspace, so emulate that. 3235 if (need_resched() || (iter & 0xfff)) 3236 schedule(); 3237 return; 3238 } 3239 // No userspace emulation: CB invocation throttles call_rcu() 3240 cond_resched(); 3241 } 3242 3243 /* 3244 * Free all callbacks on the rcu_fwd_cb_head list, either because the 3245 * test is over or because we hit an OOM event. 3246 */ 3247 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 3248 { 3249 unsigned long flags; 3250 unsigned long freed = 0; 3251 struct rcu_fwd_cb *rfcp; 3252 3253 for (;;) { 3254 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 3255 rfcp = rfp->rcu_fwd_cb_head; 3256 if (!rfcp) { 3257 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3258 break; 3259 } 3260 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 3261 if (!rfp->rcu_fwd_cb_head) 3262 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 3263 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3264 kfree(rfcp); 3265 freed++; 3266 rcu_torture_fwd_prog_cond_resched(freed); 3267 if (tick_nohz_full_enabled()) { 3268 local_irq_save(flags); 3269 rcu_momentary_eqs(); 3270 local_irq_restore(flags); 3271 } 3272 } 3273 return freed; 3274 } 3275 3276 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 3277 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 3278 int *tested, int *tested_tries) 3279 { 3280 unsigned long cver; 3281 unsigned long dur; 3282 struct fwd_cb_state fcs; 3283 unsigned long gps; 3284 int idx; 3285 int sd; 3286 int sd4; 3287 bool selfpropcb = false; 3288 unsigned long stopat; 3289 static DEFINE_TORTURE_RANDOM(trs); 3290 3291 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3292 if (!cur_ops->sync) 3293 return; // Cannot do need_resched() forward progress testing without ->sync. 3294 if (cur_ops->call && cur_ops->cb_barrier) { 3295 init_rcu_head_on_stack(&fcs.rh); 3296 selfpropcb = true; 3297 } 3298 3299 /* Tight loop containing cond_resched(). */ 3300 atomic_inc(&rcu_fwd_cb_nodelay); 3301 cur_ops->sync(); /* Later readers see above write. */ 3302 if (selfpropcb) { 3303 WRITE_ONCE(fcs.stop, 0); 3304 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 3305 } 3306 cver = READ_ONCE(rcu_torture_current_version); 3307 gps = cur_ops->get_gp_seq(); 3308 sd = cur_ops->stall_dur() + 1; 3309 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 3310 dur = sd4 + torture_random(&trs) % (sd - sd4); 3311 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 3312 stopat = rfp->rcu_fwd_startat + dur; 3313 while (time_before(jiffies, stopat) && 3314 !shutdown_time_arrived() && 3315 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3316 idx = cur_ops->readlock(); 3317 udelay(10); 3318 cur_ops->readunlock(idx); 3319 if (!fwd_progress_need_resched || need_resched()) 3320 cond_resched(); 3321 } 3322 (*tested_tries)++; 3323 if (!time_before(jiffies, stopat) && 3324 !shutdown_time_arrived() && 3325 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3326 (*tested)++; 3327 cver = READ_ONCE(rcu_torture_current_version) - cver; 3328 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3329 WARN_ON(!cver && gps < 2); 3330 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 3331 rfp->rcu_fwd_id, dur, cver, gps); 3332 } 3333 if (selfpropcb) { 3334 WRITE_ONCE(fcs.stop, 1); 3335 cur_ops->sync(); /* Wait for running CB to complete. */ 3336 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3337 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 3338 } 3339 3340 if (selfpropcb) { 3341 WARN_ON(READ_ONCE(fcs.stop) != 2); 3342 destroy_rcu_head_on_stack(&fcs.rh); 3343 } 3344 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 3345 atomic_dec(&rcu_fwd_cb_nodelay); 3346 } 3347 3348 /* Carry out call_rcu() forward-progress testing. */ 3349 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 3350 { 3351 unsigned long cver; 3352 unsigned long flags; 3353 unsigned long gps; 3354 int i; 3355 long n_launders; 3356 long n_launders_cb_snap; 3357 long n_launders_sa; 3358 long n_max_cbs; 3359 long n_max_gps; 3360 struct rcu_fwd_cb *rfcp; 3361 struct rcu_fwd_cb *rfcpn; 3362 unsigned long stopat; 3363 unsigned long stoppedat; 3364 3365 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3366 if (READ_ONCE(rcu_fwd_emergency_stop)) 3367 return; /* Get out of the way quickly, no GP wait! */ 3368 if (!cur_ops->call) 3369 return; /* Can't do call_rcu() fwd prog without ->call. */ 3370 3371 /* Loop continuously posting RCU callbacks. */ 3372 atomic_inc(&rcu_fwd_cb_nodelay); 3373 cur_ops->sync(); /* Later readers see above write. */ 3374 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 3375 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 3376 n_launders = 0; 3377 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 3378 n_launders_sa = 0; 3379 n_max_cbs = 0; 3380 n_max_gps = 0; 3381 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 3382 rfp->n_launders_hist[i].n_launders = 0; 3383 cver = READ_ONCE(rcu_torture_current_version); 3384 gps = cur_ops->get_gp_seq(); 3385 rfp->rcu_launder_gp_seq_start = gps; 3386 tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. 3387 while (time_before(jiffies, stopat) && 3388 !shutdown_time_arrived() && 3389 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3390 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 3391 rfcpn = NULL; 3392 if (rfcp) 3393 rfcpn = READ_ONCE(rfcp->rfc_next); 3394 if (rfcpn) { 3395 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 3396 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 3397 break; 3398 rfp->rcu_fwd_cb_head = rfcpn; 3399 n_launders++; 3400 n_launders_sa++; 3401 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 3402 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 3403 if (WARN_ON_ONCE(!rfcp)) { 3404 schedule_timeout_interruptible(1); 3405 continue; 3406 } 3407 n_max_cbs++; 3408 n_launders_sa = 0; 3409 rfcp->rfc_gps = 0; 3410 rfcp->rfc_rfp = rfp; 3411 } else { 3412 rfcp = NULL; 3413 } 3414 if (rfcp) 3415 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 3416 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 3417 if (tick_nohz_full_enabled()) { 3418 local_irq_save(flags); 3419 rcu_momentary_eqs(); 3420 local_irq_restore(flags); 3421 } 3422 } 3423 stoppedat = jiffies; 3424 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 3425 cver = READ_ONCE(rcu_torture_current_version) - cver; 3426 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3427 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3428 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 3429 (void)rcu_torture_fwd_prog_cbfree(rfp); 3430 3431 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 3432 !shutdown_time_arrived()) { 3433 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 3434 cur_ops->gp_kthread_dbg(); 3435 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 3436 __func__, 3437 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 3438 n_launders + n_max_cbs - n_launders_cb_snap, 3439 n_launders, n_launders_sa, 3440 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 3441 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 3442 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 3443 rcu_torture_fwd_cb_hist(rfp); 3444 mutex_unlock(&rcu_fwd_mutex); 3445 } 3446 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 3447 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 3448 atomic_dec(&rcu_fwd_cb_nodelay); 3449 } 3450 3451 3452 /* 3453 * OOM notifier, but this only prints diagnostic information for the 3454 * current forward-progress test. 3455 */ 3456 static int rcutorture_oom_notify(struct notifier_block *self, 3457 unsigned long notused, void *nfreed) 3458 { 3459 int i; 3460 long ncbs; 3461 struct rcu_fwd *rfp; 3462 3463 mutex_lock(&rcu_fwd_mutex); 3464 rfp = rcu_fwds; 3465 if (!rfp) { 3466 mutex_unlock(&rcu_fwd_mutex); 3467 return NOTIFY_OK; 3468 } 3469 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 3470 __func__); 3471 for (i = 0; i < fwd_progress; i++) { 3472 rcu_torture_fwd_cb_hist(&rfp[i]); 3473 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 3474 } 3475 WRITE_ONCE(rcu_fwd_emergency_stop, true); 3476 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 3477 ncbs = 0; 3478 for (i = 0; i < fwd_progress; i++) 3479 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3480 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3481 cur_ops->cb_barrier(); 3482 ncbs = 0; 3483 for (i = 0; i < fwd_progress; i++) 3484 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3485 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3486 cur_ops->cb_barrier(); 3487 ncbs = 0; 3488 for (i = 0; i < fwd_progress; i++) 3489 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3490 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3491 smp_mb(); /* Frees before return to avoid redoing OOM. */ 3492 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 3493 pr_info("%s returning after OOM processing.\n", __func__); 3494 mutex_unlock(&rcu_fwd_mutex); 3495 return NOTIFY_OK; 3496 } 3497 3498 static struct notifier_block rcutorture_oom_nb = { 3499 .notifier_call = rcutorture_oom_notify 3500 }; 3501 3502 /* Carry out grace-period forward-progress testing. */ 3503 static int rcu_torture_fwd_prog(void *args) 3504 { 3505 bool firsttime = true; 3506 long max_cbs; 3507 int oldnice = task_nice(current); 3508 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 3509 struct rcu_fwd *rfp = args; 3510 int tested = 0; 3511 int tested_tries = 0; 3512 3513 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 3514 while (!rcu_inkernel_boot_has_ended()) 3515 schedule_timeout_interruptible(HZ / 10); 3516 rcu_bind_current_to_nocb(); 3517 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 3518 set_user_nice(current, MAX_NICE); 3519 do { 3520 if (!rfp->rcu_fwd_id) { 3521 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 3522 WRITE_ONCE(rcu_fwd_emergency_stop, false); 3523 if (!firsttime) { 3524 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 3525 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 3526 } 3527 firsttime = false; 3528 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 3529 } else { 3530 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 3531 schedule_timeout_interruptible(HZ / 20); 3532 oldseq = READ_ONCE(rcu_fwd_seq); 3533 } 3534 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3535 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 3536 rcu_torture_fwd_prog_cr(rfp); 3537 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 3538 (!IS_ENABLED(CONFIG_TINY_RCU) || 3539 (rcu_inkernel_boot_has_ended() && 3540 torture_num_online_cpus() > rfp->rcu_fwd_id))) 3541 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 3542 3543 /* Avoid slow periods, better to test when busy. */ 3544 if (stutter_wait("rcu_torture_fwd_prog")) 3545 sched_set_normal(current, oldnice); 3546 } while (!torture_must_stop()); 3547 /* Short runs might not contain a valid forward-progress attempt. */ 3548 if (!rfp->rcu_fwd_id) { 3549 WARN_ON(!tested && tested_tries >= 5); 3550 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 3551 } 3552 torture_kthread_stopping("rcu_torture_fwd_prog"); 3553 return 0; 3554 } 3555 3556 /* If forward-progress checking is requested and feasible, spawn the thread. */ 3557 static int __init rcu_torture_fwd_prog_init(void) 3558 { 3559 int i; 3560 int ret = 0; 3561 struct rcu_fwd *rfp; 3562 3563 if (!fwd_progress) 3564 return 0; /* Not requested, so don't do it. */ 3565 if (fwd_progress >= nr_cpu_ids) { 3566 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 3567 fwd_progress = nr_cpu_ids; 3568 } else if (fwd_progress < 0) { 3569 fwd_progress = nr_cpu_ids; 3570 } 3571 if ((!cur_ops->sync && !cur_ops->call) || 3572 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 3573 cur_ops == &rcu_busted_ops) { 3574 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 3575 fwd_progress = 0; 3576 return 0; 3577 } 3578 if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3579 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing"); 3580 fwd_progress = 0; 3581 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 3582 return -EINVAL; /* In module, can fail back to user. */ 3583 WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */ 3584 return 0; 3585 } 3586 if (fwd_progress_holdoff <= 0) 3587 fwd_progress_holdoff = 1; 3588 if (fwd_progress_div <= 0) 3589 fwd_progress_div = 4; 3590 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 3591 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 3592 if (!rfp || !fwd_prog_tasks) { 3593 kfree(rfp); 3594 kfree(fwd_prog_tasks); 3595 fwd_prog_tasks = NULL; 3596 fwd_progress = 0; 3597 return -ENOMEM; 3598 } 3599 for (i = 0; i < fwd_progress; i++) { 3600 spin_lock_init(&rfp[i].rcu_fwd_lock); 3601 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3602 rfp[i].rcu_fwd_id = i; 3603 } 3604 mutex_lock(&rcu_fwd_mutex); 3605 rcu_fwds = rfp; 3606 mutex_unlock(&rcu_fwd_mutex); 3607 register_oom_notifier(&rcutorture_oom_nb); 3608 for (i = 0; i < fwd_progress; i++) { 3609 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3610 if (ret) { 3611 fwd_progress = i; 3612 return ret; 3613 } 3614 } 3615 return 0; 3616 } 3617 3618 static void rcu_torture_fwd_prog_cleanup(void) 3619 { 3620 int i; 3621 struct rcu_fwd *rfp; 3622 3623 if (!rcu_fwds || !fwd_prog_tasks) 3624 return; 3625 for (i = 0; i < fwd_progress; i++) 3626 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3627 unregister_oom_notifier(&rcutorture_oom_nb); 3628 mutex_lock(&rcu_fwd_mutex); 3629 rfp = rcu_fwds; 3630 rcu_fwds = NULL; 3631 mutex_unlock(&rcu_fwd_mutex); 3632 kfree(rfp); 3633 kfree(fwd_prog_tasks); 3634 fwd_prog_tasks = NULL; 3635 } 3636 3637 /* Callback function for RCU barrier testing. */ 3638 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3639 { 3640 atomic_inc(&barrier_cbs_invoked); 3641 } 3642 3643 /* IPI handler to get callback posted on desired CPU, if online. */ 3644 static int rcu_torture_barrier1cb(void *rcu_void) 3645 { 3646 struct rcu_head *rhp = rcu_void; 3647 3648 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3649 return 0; 3650 } 3651 3652 /* kthread function to register callbacks used to test RCU barriers. */ 3653 static int rcu_torture_barrier_cbs(void *arg) 3654 { 3655 long myid = (long)arg; 3656 bool lastphase = false; 3657 bool newphase; 3658 struct rcu_head rcu; 3659 3660 init_rcu_head_on_stack(&rcu); 3661 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3662 set_user_nice(current, MAX_NICE); 3663 do { 3664 wait_event(barrier_cbs_wq[myid], 3665 (newphase = 3666 smp_load_acquire(&barrier_phase)) != lastphase || 3667 torture_must_stop()); 3668 lastphase = newphase; 3669 if (torture_must_stop()) 3670 break; 3671 /* 3672 * The above smp_load_acquire() ensures barrier_phase load 3673 * is ordered before the following ->call(). 3674 */ 3675 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3676 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3677 3678 if (atomic_dec_and_test(&barrier_cbs_count)) 3679 wake_up(&barrier_wq); 3680 } while (!torture_must_stop()); 3681 if (cur_ops->cb_barrier != NULL) 3682 cur_ops->cb_barrier(); 3683 destroy_rcu_head_on_stack(&rcu); 3684 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3685 return 0; 3686 } 3687 3688 /* kthread function to drive and coordinate RCU barrier testing. */ 3689 static int rcu_torture_barrier(void *arg) 3690 { 3691 int i; 3692 3693 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3694 do { 3695 atomic_set(&barrier_cbs_invoked, 0); 3696 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3697 /* Ensure barrier_phase ordered after prior assignments. */ 3698 smp_store_release(&barrier_phase, !barrier_phase); 3699 for (i = 0; i < n_barrier_cbs; i++) 3700 wake_up(&barrier_cbs_wq[i]); 3701 wait_event(barrier_wq, 3702 atomic_read(&barrier_cbs_count) == 0 || 3703 torture_must_stop()); 3704 if (torture_must_stop()) 3705 break; 3706 n_barrier_attempts++; 3707 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3708 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3709 n_rcu_torture_barrier_error++; 3710 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3711 atomic_read(&barrier_cbs_invoked), 3712 n_barrier_cbs); 3713 WARN_ON(1); 3714 // Wait manually for the remaining callbacks 3715 i = 0; 3716 do { 3717 if (WARN_ON(i++ > HZ)) 3718 i = INT_MIN; 3719 schedule_timeout_interruptible(1); 3720 cur_ops->cb_barrier(); 3721 } while (atomic_read(&barrier_cbs_invoked) != 3722 n_barrier_cbs && 3723 !torture_must_stop()); 3724 smp_mb(); // Can't trust ordering if broken. 3725 if (!torture_must_stop()) 3726 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3727 atomic_read(&barrier_cbs_invoked)); 3728 } else { 3729 n_barrier_successes++; 3730 } 3731 schedule_timeout_interruptible(HZ / 10); 3732 } while (!torture_must_stop()); 3733 torture_kthread_stopping("rcu_torture_barrier"); 3734 return 0; 3735 } 3736 3737 /* Initialize RCU barrier testing. */ 3738 static int rcu_torture_barrier_init(void) 3739 { 3740 int i; 3741 int ret; 3742 3743 if (n_barrier_cbs <= 0) 3744 return 0; 3745 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3746 pr_alert("%s" TORTURE_FLAG 3747 " Call or barrier ops missing for %s,\n", 3748 torture_type, cur_ops->name); 3749 pr_alert("%s" TORTURE_FLAG 3750 " RCU barrier testing omitted from run.\n", 3751 torture_type); 3752 return 0; 3753 } 3754 atomic_set(&barrier_cbs_count, 0); 3755 atomic_set(&barrier_cbs_invoked, 0); 3756 barrier_cbs_tasks = 3757 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3758 GFP_KERNEL); 3759 barrier_cbs_wq = 3760 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3761 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3762 return -ENOMEM; 3763 for (i = 0; i < n_barrier_cbs; i++) { 3764 init_waitqueue_head(&barrier_cbs_wq[i]); 3765 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3766 (void *)(long)i, 3767 barrier_cbs_tasks[i]); 3768 if (ret) 3769 return ret; 3770 } 3771 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3772 } 3773 3774 /* Clean up after RCU barrier testing. */ 3775 static void rcu_torture_barrier_cleanup(void) 3776 { 3777 int i; 3778 3779 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3780 if (barrier_cbs_tasks != NULL) { 3781 for (i = 0; i < n_barrier_cbs; i++) 3782 torture_stop_kthread(rcu_torture_barrier_cbs, 3783 barrier_cbs_tasks[i]); 3784 kfree(barrier_cbs_tasks); 3785 barrier_cbs_tasks = NULL; 3786 } 3787 if (barrier_cbs_wq != NULL) { 3788 kfree(barrier_cbs_wq); 3789 barrier_cbs_wq = NULL; 3790 } 3791 } 3792 3793 static bool rcu_torture_can_boost(void) 3794 { 3795 static int boost_warn_once; 3796 int prio; 3797 3798 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3799 return false; 3800 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3801 return false; 3802 3803 prio = rcu_get_gp_kthreads_prio(); 3804 if (!prio) 3805 return false; 3806 3807 if (prio < 2) { 3808 if (boost_warn_once == 1) 3809 return false; 3810 3811 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3812 boost_warn_once = 1; 3813 return false; 3814 } 3815 3816 return true; 3817 } 3818 3819 static bool read_exit_child_stop; 3820 static bool read_exit_child_stopped; 3821 static wait_queue_head_t read_exit_wq; 3822 3823 // Child kthread which just does an rcutorture reader and exits. 3824 static int rcu_torture_read_exit_child(void *trsp_in) 3825 { 3826 struct torture_random_state *trsp = trsp_in; 3827 3828 set_user_nice(current, MAX_NICE); 3829 // Minimize time between reading and exiting. 3830 while (!kthread_should_stop()) 3831 schedule_timeout_uninterruptible(HZ / 20); 3832 (void)rcu_torture_one_read(trsp, -1); 3833 return 0; 3834 } 3835 3836 // Parent kthread which creates and destroys read-exit child kthreads. 3837 static int rcu_torture_read_exit(void *unused) 3838 { 3839 bool errexit = false; 3840 int i; 3841 struct task_struct *tsp; 3842 DEFINE_TORTURE_RANDOM(trs); 3843 3844 // Allocate and initialize. 3845 set_user_nice(current, MAX_NICE); 3846 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3847 3848 // Each pass through this loop does one read-exit episode. 3849 do { 3850 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3851 for (i = 0; i < read_exit_burst; i++) { 3852 if (READ_ONCE(read_exit_child_stop)) 3853 break; 3854 stutter_wait("rcu_torture_read_exit"); 3855 // Spawn child. 3856 tsp = kthread_run(rcu_torture_read_exit_child, 3857 &trs, "%s", "rcu_torture_read_exit_child"); 3858 if (IS_ERR(tsp)) { 3859 TOROUT_ERRSTRING("out of memory"); 3860 errexit = true; 3861 break; 3862 } 3863 cond_resched(); 3864 kthread_stop(tsp); 3865 n_read_exits++; 3866 } 3867 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3868 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3869 i = 0; 3870 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3871 schedule_timeout_uninterruptible(HZ); 3872 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3873 3874 // Clean up and exit. 3875 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3876 smp_mb(); // Store before wakeup. 3877 wake_up(&read_exit_wq); 3878 while (!torture_must_stop()) 3879 schedule_timeout_uninterruptible(HZ / 20); 3880 torture_kthread_stopping("rcu_torture_read_exit"); 3881 return 0; 3882 } 3883 3884 static int rcu_torture_read_exit_init(void) 3885 { 3886 if (read_exit_burst <= 0) 3887 return 0; 3888 init_waitqueue_head(&read_exit_wq); 3889 read_exit_child_stop = false; 3890 read_exit_child_stopped = false; 3891 return torture_create_kthread(rcu_torture_read_exit, NULL, 3892 read_exit_task); 3893 } 3894 3895 static void rcu_torture_read_exit_cleanup(void) 3896 { 3897 if (!read_exit_task) 3898 return; 3899 WRITE_ONCE(read_exit_child_stop, true); 3900 smp_mb(); // Above write before wait. 3901 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3902 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3903 } 3904 3905 static void rcutorture_test_nmis(int n) 3906 { 3907 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3908 int cpu; 3909 int dumpcpu; 3910 int i; 3911 3912 for (i = 0; i < n; i++) { 3913 preempt_disable(); 3914 cpu = smp_processor_id(); 3915 dumpcpu = cpu + 1; 3916 if (dumpcpu >= nr_cpu_ids) 3917 dumpcpu = 0; 3918 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3919 dump_cpu_task(dumpcpu); 3920 preempt_enable(); 3921 schedule_timeout_uninterruptible(15 * HZ); 3922 } 3923 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3924 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3925 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3926 } 3927 3928 // Randomly preempt online CPUs. 3929 static int rcu_torture_preempt(void *unused) 3930 { 3931 int cpu = -1; 3932 DEFINE_TORTURE_RANDOM(rand); 3933 3934 schedule_timeout_idle(stall_cpu_holdoff); 3935 do { 3936 // Wait for preempt_interval ms with up to 100us fuzz. 3937 torture_hrtimeout_ms(preempt_interval, 100, &rand); 3938 // Select online CPU. 3939 cpu = cpumask_next(cpu, cpu_online_mask); 3940 if (cpu >= nr_cpu_ids) 3941 cpu = cpumask_next(-1, cpu_online_mask); 3942 WARN_ON_ONCE(cpu >= nr_cpu_ids); 3943 // Move to that CPU, if can't do so, retry later. 3944 if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false)) 3945 continue; 3946 // Preempt at high-ish priority, then reset to normal. 3947 sched_set_fifo(current); 3948 torture_sched_setaffinity(current->pid, cpu_present_mask, true); 3949 mdelay(preempt_duration); 3950 sched_set_normal(current, 0); 3951 stutter_wait("rcu_torture_preempt"); 3952 } while (!torture_must_stop()); 3953 torture_kthread_stopping("rcu_torture_preempt"); 3954 return 0; 3955 } 3956 3957 static enum cpuhp_state rcutor_hp; 3958 3959 static struct hrtimer gpwrap_lag_timer; 3960 static bool gpwrap_lag_active; 3961 3962 /* Timer handler for toggling RCU grace-period sequence overflow test lag value */ 3963 static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer) 3964 { 3965 ktime_t next_delay; 3966 3967 if (gpwrap_lag_active) { 3968 pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n"); 3969 cur_ops->set_gpwrap_lag(0); 3970 gpwrap_lag_active = false; 3971 next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0); 3972 } else { 3973 pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps); 3974 cur_ops->set_gpwrap_lag(gpwrap_lag_gps); 3975 gpwrap_lag_active = true; 3976 next_delay = ktime_set(gpwrap_lag_active_mins * 60, 0); 3977 } 3978 3979 if (torture_must_stop_irq()) 3980 return HRTIMER_NORESTART; 3981 3982 hrtimer_forward_now(timer, next_delay); 3983 return HRTIMER_RESTART; 3984 } 3985 3986 static int rcu_gpwrap_lag_init(void) 3987 { 3988 if (!gpwrap_lag) 3989 return 0; 3990 3991 if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) { 3992 pr_alert("rcu-torture: lag timing parameters must be positive\n"); 3993 return -EINVAL; 3994 } 3995 3996 hrtimer_setup(&gpwrap_lag_timer, rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3997 gpwrap_lag_active = false; 3998 hrtimer_start(&gpwrap_lag_timer, 3999 ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL); 4000 4001 return 0; 4002 } 4003 4004 static void rcu_gpwrap_lag_cleanup(void) 4005 { 4006 hrtimer_cancel(&gpwrap_lag_timer); 4007 cur_ops->set_gpwrap_lag(0); 4008 gpwrap_lag_active = false; 4009 } 4010 static void 4011 rcu_torture_cleanup(void) 4012 { 4013 int firsttime; 4014 int flags = 0; 4015 unsigned long gp_seq = 0; 4016 int i; 4017 int j; 4018 4019 if (torture_cleanup_begin()) { 4020 if (cur_ops->cb_barrier != NULL) { 4021 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 4022 cur_ops->cb_barrier(); 4023 } 4024 if (cur_ops->gp_slow_unregister) 4025 cur_ops->gp_slow_unregister(NULL); 4026 return; 4027 } 4028 if (!cur_ops) { 4029 torture_cleanup_end(); 4030 return; 4031 } 4032 4033 rcutorture_test_nmis(test_nmis); 4034 4035 if (cur_ops->gp_kthread_dbg) 4036 cur_ops->gp_kthread_dbg(); 4037 torture_stop_kthread(rcu_torture_preempt, preempt_task); 4038 rcu_torture_read_exit_cleanup(); 4039 rcu_torture_barrier_cleanup(); 4040 rcu_torture_fwd_prog_cleanup(); 4041 torture_stop_kthread(rcu_torture_stall, stall_task); 4042 torture_stop_kthread(rcu_torture_writer, writer_task); 4043 4044 if (nocb_tasks) { 4045 for (i = 0; i < nrealnocbers; i++) 4046 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 4047 kfree(nocb_tasks); 4048 nocb_tasks = NULL; 4049 } 4050 4051 if (updown_task) { 4052 torture_stop_kthread(rcu_torture_updown, updown_task); 4053 updown_task = NULL; 4054 } 4055 if (reader_tasks) { 4056 for (i = 0; i < nrealreaders; i++) 4057 torture_stop_kthread(rcu_torture_reader, 4058 reader_tasks[i]); 4059 kfree(reader_tasks); 4060 reader_tasks = NULL; 4061 } 4062 kfree(rcu_torture_reader_mbchk); 4063 rcu_torture_reader_mbchk = NULL; 4064 4065 if (fakewriter_tasks) { 4066 for (i = 0; i < nrealfakewriters; i++) 4067 torture_stop_kthread(rcu_torture_fakewriter, 4068 fakewriter_tasks[i]); 4069 kfree(fakewriter_tasks); 4070 fakewriter_tasks = NULL; 4071 } 4072 4073 if (cur_ops->get_gp_data) 4074 cur_ops->get_gp_data(&flags, &gp_seq); 4075 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 4076 cur_ops->name, (long)gp_seq, flags, 4077 rcutorture_seq_diff(gp_seq, start_gp_seq)); 4078 torture_stop_kthread(rcu_torture_stats, stats_task); 4079 torture_stop_kthread(rcu_torture_fqs, fqs_task); 4080 if (rcu_torture_can_boost() && rcutor_hp >= 0) 4081 cpuhp_remove_state(rcutor_hp); 4082 4083 /* 4084 * Wait for all RCU callbacks to fire, then do torture-type-specific 4085 * cleanup operations. 4086 */ 4087 if (cur_ops->cb_barrier != NULL) { 4088 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 4089 cur_ops->cb_barrier(); 4090 } 4091 if (cur_ops->cleanup != NULL) 4092 cur_ops->cleanup(); 4093 4094 rcu_torture_mem_dump_obj(); 4095 4096 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 4097 4098 if (err_segs_recorded) { 4099 pr_alert("Failure/close-call rcutorture reader segments:\n"); 4100 if (rt_read_nsegs == 0) 4101 pr_alert("\t: No segments recorded!!!\n"); 4102 firsttime = 1; 4103 for (i = 0; i < rt_read_nsegs; i++) { 4104 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP)) 4105 pr_alert("\t%lluus ", div64_u64(err_segs[i].rt_ts, 1000ULL)); 4106 else 4107 pr_alert("\t"); 4108 pr_cont("%d: %#4x", i, err_segs[i].rt_readstate); 4109 if (err_segs[i].rt_delay_jiffies != 0) { 4110 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 4111 err_segs[i].rt_delay_jiffies); 4112 firsttime = 0; 4113 } 4114 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 4115 pr_cont(" CPU %2d", err_segs[i].rt_cpu); 4116 if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu) 4117 pr_cont("->%-2d", err_segs[i].rt_end_cpu); 4118 else 4119 pr_cont(" ..."); 4120 } 4121 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && 4122 cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) { 4123 char buf1[20+1]; 4124 char buf2[20+1]; 4125 char sepchar = '-'; 4126 4127 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq, 4128 buf1, ARRAY_SIZE(buf1)); 4129 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end, 4130 buf2, ARRAY_SIZE(buf2)); 4131 if (err_segs[i].rt_gp_seq == err_segs[i].rt_gp_seq_end) { 4132 if (buf2[0]) { 4133 for (j = 0; buf2[j]; j++) 4134 buf2[j] = '.'; 4135 if (j) 4136 buf2[j - 1] = ' '; 4137 } 4138 sepchar = ' '; 4139 } 4140 pr_cont(" %s%c%s", buf1, sepchar, buf2); 4141 } 4142 if (err_segs[i].rt_delay_ms != 0) { 4143 pr_cont(" %s%ldms", firsttime ? "" : "+", 4144 err_segs[i].rt_delay_ms); 4145 firsttime = 0; 4146 } 4147 if (err_segs[i].rt_delay_us != 0) { 4148 pr_cont(" %s%ldus", firsttime ? "" : "+", 4149 err_segs[i].rt_delay_us); 4150 firsttime = 0; 4151 } 4152 pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : ""); 4153 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH) 4154 pr_cont(" BH"); 4155 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ) 4156 pr_cont(" IRQ"); 4157 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT) 4158 pr_cont(" PREEMPT"); 4159 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH) 4160 pr_cont(" RBH"); 4161 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED) 4162 pr_cont(" SCHED"); 4163 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1) 4164 pr_cont(" RCU_1"); 4165 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2) 4166 pr_cont(" RCU_2"); 4167 pr_cont("\n"); 4168 4169 } 4170 if (rt_read_preempted) 4171 pr_alert("\tReader was preempted.\n"); 4172 } 4173 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 4174 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 4175 else if (torture_onoff_failures()) 4176 rcu_torture_print_module_parms(cur_ops, 4177 "End of test: RCU_HOTPLUG"); 4178 else 4179 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 4180 torture_cleanup_end(); 4181 if (cur_ops->gp_slow_unregister) 4182 cur_ops->gp_slow_unregister(NULL); 4183 4184 if (gpwrap_lag && cur_ops->set_gpwrap_lag) 4185 rcu_gpwrap_lag_cleanup(); 4186 } 4187 4188 static void rcu_torture_leak_cb(struct rcu_head *rhp) 4189 { 4190 } 4191 4192 static void rcu_torture_err_cb(struct rcu_head *rhp) 4193 { 4194 /* 4195 * This -might- happen due to race conditions, but is unlikely. 4196 * The scenario that leads to this happening is that the 4197 * first of the pair of duplicate callbacks is queued, 4198 * someone else starts a grace period that includes that 4199 * callback, then the second of the pair must wait for the 4200 * next grace period. Unlikely, but can happen. If it 4201 * does happen, the debug-objects subsystem won't have splatted. 4202 */ 4203 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 4204 } 4205 4206 /* 4207 * Verify that double-free causes debug-objects to complain, but only 4208 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 4209 * cannot be carried out. 4210 */ 4211 static void rcu_test_debug_objects(void) 4212 { 4213 struct rcu_head rh1; 4214 struct rcu_head rh2; 4215 int idx; 4216 4217 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 4218 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 4219 KBUILD_MODNAME, cur_ops->name); 4220 return; 4221 } 4222 4223 if (WARN_ON_ONCE(cur_ops->debug_objects && 4224 (!cur_ops->call || !cur_ops->cb_barrier))) 4225 return; 4226 4227 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 4228 4229 init_rcu_head_on_stack(&rh1); 4230 init_rcu_head_on_stack(&rh2); 4231 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 4232 4233 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 4234 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 4235 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 4236 cur_ops->call(&rh2, rcu_torture_leak_cb); 4237 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 4238 if (rhp) { 4239 cur_ops->call(rhp, rcu_torture_leak_cb); 4240 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 4241 } 4242 cur_ops->readunlock(idx); 4243 4244 /* Wait for them all to get done so we can safely return. */ 4245 cur_ops->cb_barrier(); 4246 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 4247 destroy_rcu_head_on_stack(&rh1); 4248 destroy_rcu_head_on_stack(&rh2); 4249 kfree(rhp); 4250 } 4251 4252 static void rcutorture_sync(void) 4253 { 4254 static unsigned long n; 4255 4256 if (cur_ops->sync && !(++n & 0xfff)) 4257 cur_ops->sync(); 4258 } 4259 4260 static DEFINE_MUTEX(mut0); 4261 static DEFINE_MUTEX(mut1); 4262 static DEFINE_MUTEX(mut2); 4263 static DEFINE_MUTEX(mut3); 4264 static DEFINE_MUTEX(mut4); 4265 static DEFINE_MUTEX(mut5); 4266 static DEFINE_MUTEX(mut6); 4267 static DEFINE_MUTEX(mut7); 4268 static DEFINE_MUTEX(mut8); 4269 static DEFINE_MUTEX(mut9); 4270 4271 static DECLARE_RWSEM(rwsem0); 4272 static DECLARE_RWSEM(rwsem1); 4273 static DECLARE_RWSEM(rwsem2); 4274 static DECLARE_RWSEM(rwsem3); 4275 static DECLARE_RWSEM(rwsem4); 4276 static DECLARE_RWSEM(rwsem5); 4277 static DECLARE_RWSEM(rwsem6); 4278 static DECLARE_RWSEM(rwsem7); 4279 static DECLARE_RWSEM(rwsem8); 4280 static DECLARE_RWSEM(rwsem9); 4281 4282 DEFINE_STATIC_SRCU(srcu0); 4283 DEFINE_STATIC_SRCU(srcu1); 4284 DEFINE_STATIC_SRCU(srcu2); 4285 DEFINE_STATIC_SRCU(srcu3); 4286 DEFINE_STATIC_SRCU(srcu4); 4287 DEFINE_STATIC_SRCU(srcu5); 4288 DEFINE_STATIC_SRCU(srcu6); 4289 DEFINE_STATIC_SRCU(srcu7); 4290 DEFINE_STATIC_SRCU(srcu8); 4291 DEFINE_STATIC_SRCU(srcu9); 4292 4293 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 4294 int cyclelen, int deadlock) 4295 { 4296 int j = i + 1; 4297 4298 if (j >= cyclelen) 4299 j = deadlock ? 0 : -1; 4300 if (j >= 0) 4301 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 4302 else 4303 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 4304 return j; 4305 } 4306 4307 // Test lockdep on SRCU-based deadlock scenarios. 4308 static void rcu_torture_init_srcu_lockdep(void) 4309 { 4310 int cyclelen; 4311 int deadlock; 4312 bool err = false; 4313 int i; 4314 int j; 4315 int idx; 4316 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 4317 &mut5, &mut6, &mut7, &mut8, &mut9 }; 4318 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 4319 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 4320 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 4321 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 4322 int testtype; 4323 4324 if (!test_srcu_lockdep) 4325 return; 4326 4327 deadlock = test_srcu_lockdep / 1000; 4328 testtype = (test_srcu_lockdep / 10) % 100; 4329 cyclelen = test_srcu_lockdep % 10; 4330 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 4331 if (WARN_ONCE(deadlock != !!deadlock, 4332 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 4333 __func__, test_srcu_lockdep, deadlock)) 4334 err = true; 4335 if (WARN_ONCE(cyclelen <= 0, 4336 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 4337 __func__, test_srcu_lockdep, cyclelen)) 4338 err = true; 4339 if (err) 4340 goto err_out; 4341 4342 if (testtype == 0) { 4343 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 4344 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4345 if (deadlock && cyclelen == 1) 4346 pr_info("%s: Expect hang.\n", __func__); 4347 for (i = 0; i < cyclelen; i++) { 4348 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 4349 "srcu_read_unlock", i, cyclelen, deadlock); 4350 idx = srcu_read_lock(srcus[i]); 4351 if (j >= 0) 4352 synchronize_srcu(srcus[j]); 4353 srcu_read_unlock(srcus[i], idx); 4354 } 4355 return; 4356 } 4357 4358 if (testtype == 1) { 4359 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 4360 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4361 for (i = 0; i < cyclelen; i++) { 4362 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 4363 __func__, i, i, i, i); 4364 idx = srcu_read_lock(srcus[i]); 4365 mutex_lock(muts[i]); 4366 mutex_unlock(muts[i]); 4367 srcu_read_unlock(srcus[i], idx); 4368 4369 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 4370 "mutex_unlock", i, cyclelen, deadlock); 4371 mutex_lock(muts[i]); 4372 if (j >= 0) 4373 synchronize_srcu(srcus[j]); 4374 mutex_unlock(muts[i]); 4375 } 4376 return; 4377 } 4378 4379 if (testtype == 2) { 4380 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 4381 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4382 for (i = 0; i < cyclelen; i++) { 4383 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 4384 __func__, i, i, i, i); 4385 idx = srcu_read_lock(srcus[i]); 4386 down_read(rwsems[i]); 4387 up_read(rwsems[i]); 4388 srcu_read_unlock(srcus[i], idx); 4389 4390 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 4391 "up_write", i, cyclelen, deadlock); 4392 down_write(rwsems[i]); 4393 if (j >= 0) 4394 synchronize_srcu(srcus[j]); 4395 up_write(rwsems[i]); 4396 } 4397 return; 4398 } 4399 4400 #ifdef CONFIG_TASKS_TRACE_RCU 4401 if (testtype == 3) { 4402 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 4403 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4404 if (deadlock && cyclelen == 1) 4405 pr_info("%s: Expect hang.\n", __func__); 4406 for (i = 0; i < cyclelen; i++) { 4407 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 4408 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 4409 : "synchronize_srcu"; 4410 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 4411 4412 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 4413 if (i == 0) 4414 rcu_read_lock_trace(); 4415 else 4416 idx = srcu_read_lock(srcus[i]); 4417 if (j >= 0) { 4418 if (i == cyclelen - 1) 4419 synchronize_rcu_tasks_trace(); 4420 else 4421 synchronize_srcu(srcus[j]); 4422 } 4423 if (i == 0) 4424 rcu_read_unlock_trace(); 4425 else 4426 srcu_read_unlock(srcus[i], idx); 4427 } 4428 return; 4429 } 4430 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 4431 4432 err_out: 4433 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 4434 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 4435 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 4436 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 4437 pr_info("%s: L: Cycle length.\n", __func__); 4438 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 4439 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 4440 } 4441 4442 static int __init 4443 rcu_torture_init(void) 4444 { 4445 long i; 4446 int cpu; 4447 int firsterr = 0; 4448 int flags = 0; 4449 unsigned long gp_seq = 0; 4450 static struct rcu_torture_ops *torture_ops[] = { 4451 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 4452 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 4453 &trivial_ops, 4454 }; 4455 4456 if (!torture_init_begin(torture_type, verbose)) 4457 return -EBUSY; 4458 4459 /* Process args and tell the world that the torturer is on the job. */ 4460 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 4461 cur_ops = torture_ops[i]; 4462 if (strcmp(torture_type, cur_ops->name) == 0) 4463 break; 4464 } 4465 if (i == ARRAY_SIZE(torture_ops)) { 4466 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 4467 torture_type); 4468 pr_alert("rcu-torture types:"); 4469 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 4470 pr_cont(" %s", torture_ops[i]->name); 4471 pr_cont("\n"); 4472 firsterr = -EINVAL; 4473 cur_ops = NULL; 4474 goto unwind; 4475 } 4476 if (cur_ops->fqs == NULL && fqs_duration != 0) { 4477 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 4478 fqs_duration = 0; 4479 } 4480 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 4481 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 4482 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 4483 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 4484 nocbs_nthreads = 0; 4485 } 4486 if (cur_ops->init) 4487 cur_ops->init(); 4488 4489 rcu_torture_init_srcu_lockdep(); 4490 4491 if (nfakewriters >= 0) { 4492 nrealfakewriters = nfakewriters; 4493 } else { 4494 nrealfakewriters = num_online_cpus() - 2 - nfakewriters; 4495 if (nrealfakewriters <= 0) 4496 nrealfakewriters = 1; 4497 } 4498 4499 if (nreaders >= 0) { 4500 nrealreaders = nreaders; 4501 } else { 4502 nrealreaders = num_online_cpus() - 2 - nreaders; 4503 if (nrealreaders <= 0) 4504 nrealreaders = 1; 4505 } 4506 rcu_torture_print_module_parms(cur_ops, "Start of test"); 4507 if (cur_ops->get_gp_data) 4508 cur_ops->get_gp_data(&flags, &gp_seq); 4509 start_gp_seq = gp_seq; 4510 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 4511 cur_ops->name, (long)gp_seq, flags); 4512 4513 /* Set up the freelist. */ 4514 4515 INIT_LIST_HEAD(&rcu_torture_freelist); 4516 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 4517 rcu_tortures[i].rtort_mbtest = 0; 4518 list_add_tail(&rcu_tortures[i].rtort_free, 4519 &rcu_torture_freelist); 4520 } 4521 4522 /* Initialize the statistics so that each run gets its own numbers. */ 4523 4524 rcu_torture_current = NULL; 4525 rcu_torture_current_version = 0; 4526 atomic_set(&n_rcu_torture_alloc, 0); 4527 atomic_set(&n_rcu_torture_alloc_fail, 0); 4528 atomic_set(&n_rcu_torture_free, 0); 4529 atomic_set(&n_rcu_torture_mberror, 0); 4530 atomic_set(&n_rcu_torture_mbchk_fail, 0); 4531 atomic_set(&n_rcu_torture_mbchk_tries, 0); 4532 atomic_set(&n_rcu_torture_error, 0); 4533 n_rcu_torture_barrier_error = 0; 4534 n_rcu_torture_boost_ktrerror = 0; 4535 n_rcu_torture_boost_failure = 0; 4536 n_rcu_torture_boosts = 0; 4537 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 4538 atomic_set(&rcu_torture_wcount[i], 0); 4539 for_each_possible_cpu(cpu) { 4540 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 4541 per_cpu(rcu_torture_count, cpu)[i] = 0; 4542 per_cpu(rcu_torture_batch, cpu)[i] = 0; 4543 } 4544 } 4545 err_segs_recorded = 0; 4546 rt_read_nsegs = 0; 4547 4548 /* Start up the kthreads. */ 4549 4550 rcu_torture_write_types(); 4551 if (nrealfakewriters > 0) { 4552 fakewriter_tasks = kcalloc(nrealfakewriters, 4553 sizeof(fakewriter_tasks[0]), 4554 GFP_KERNEL); 4555 if (fakewriter_tasks == NULL) { 4556 TOROUT_ERRSTRING("out of memory"); 4557 firsterr = -ENOMEM; 4558 goto unwind; 4559 } 4560 } 4561 for (i = 0; i < nrealfakewriters; i++) { 4562 firsterr = torture_create_kthread(rcu_torture_fakewriter, 4563 NULL, fakewriter_tasks[i]); 4564 if (torture_init_error(firsterr)) 4565 goto unwind; 4566 } 4567 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 4568 GFP_KERNEL); 4569 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 4570 GFP_KERNEL); 4571 if (!reader_tasks || !rcu_torture_reader_mbchk) { 4572 TOROUT_ERRSTRING("out of memory"); 4573 firsterr = -ENOMEM; 4574 goto unwind; 4575 } 4576 for (i = 0; i < nrealreaders; i++) { 4577 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 4578 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 4579 reader_tasks[i]); 4580 if (torture_init_error(firsterr)) 4581 goto unwind; 4582 } 4583 4584 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 4585 writer_task); 4586 if (torture_init_error(firsterr)) 4587 goto unwind; 4588 4589 firsterr = rcu_torture_updown_init(); 4590 if (torture_init_error(firsterr)) 4591 goto unwind; 4592 nrealnocbers = nocbs_nthreads; 4593 if (WARN_ON(nrealnocbers < 0)) 4594 nrealnocbers = 1; 4595 if (WARN_ON(nocbs_toggle < 0)) 4596 nocbs_toggle = HZ; 4597 if (nrealnocbers > 0) { 4598 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 4599 if (nocb_tasks == NULL) { 4600 TOROUT_ERRSTRING("out of memory"); 4601 firsterr = -ENOMEM; 4602 goto unwind; 4603 } 4604 } else { 4605 nocb_tasks = NULL; 4606 } 4607 for (i = 0; i < nrealnocbers; i++) { 4608 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 4609 if (torture_init_error(firsterr)) 4610 goto unwind; 4611 } 4612 if (stat_interval > 0) { 4613 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 4614 stats_task); 4615 if (torture_init_error(firsterr)) 4616 goto unwind; 4617 } 4618 if (test_no_idle_hz && shuffle_interval > 0) { 4619 firsterr = torture_shuffle_init(shuffle_interval * HZ); 4620 if (torture_init_error(firsterr)) 4621 goto unwind; 4622 } 4623 if (stutter < 0) 4624 stutter = 0; 4625 if (stutter) { 4626 int t; 4627 4628 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 4629 firsterr = torture_stutter_init(stutter * HZ, t); 4630 if (torture_init_error(firsterr)) 4631 goto unwind; 4632 } 4633 if (fqs_duration < 0) 4634 fqs_duration = 0; 4635 if (fqs_holdoff < 0) 4636 fqs_holdoff = 0; 4637 if (fqs_duration && fqs_holdoff) { 4638 /* Create the fqs thread */ 4639 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 4640 fqs_task); 4641 if (torture_init_error(firsterr)) 4642 goto unwind; 4643 } 4644 if (test_boost_interval < 1) 4645 test_boost_interval = 1; 4646 if (test_boost_duration < 2) 4647 test_boost_duration = 2; 4648 if (rcu_torture_can_boost()) { 4649 4650 boost_starttime = jiffies + test_boost_interval * HZ; 4651 4652 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 4653 rcutorture_booster_init, 4654 rcutorture_booster_cleanup); 4655 rcutor_hp = firsterr; 4656 if (torture_init_error(firsterr)) 4657 goto unwind; 4658 } 4659 shutdown_jiffies = jiffies + shutdown_secs * HZ; 4660 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 4661 if (torture_init_error(firsterr)) 4662 goto unwind; 4663 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 4664 rcutorture_sync); 4665 if (torture_init_error(firsterr)) 4666 goto unwind; 4667 firsterr = rcu_torture_stall_init(); 4668 if (torture_init_error(firsterr)) 4669 goto unwind; 4670 firsterr = rcu_torture_fwd_prog_init(); 4671 if (torture_init_error(firsterr)) 4672 goto unwind; 4673 firsterr = rcu_torture_barrier_init(); 4674 if (torture_init_error(firsterr)) 4675 goto unwind; 4676 firsterr = rcu_torture_read_exit_init(); 4677 if (torture_init_error(firsterr)) 4678 goto unwind; 4679 if (preempt_duration > 0) { 4680 firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task); 4681 if (torture_init_error(firsterr)) 4682 goto unwind; 4683 } 4684 if (object_debug) 4685 rcu_test_debug_objects(); 4686 4687 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 4688 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 4689 4690 if (gpwrap_lag && cur_ops->set_gpwrap_lag) { 4691 firsterr = rcu_gpwrap_lag_init(); 4692 if (torture_init_error(firsterr)) 4693 goto unwind; 4694 } 4695 4696 torture_init_end(); 4697 return 0; 4698 4699 unwind: 4700 torture_init_end(); 4701 rcu_torture_cleanup(); 4702 if (shutdown_secs) { 4703 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 4704 kernel_power_off(); 4705 } 4706 return firsterr; 4707 } 4708 4709 module_init(rcu_torture_init); 4710 module_exit(rcu_torture_cleanup); 4711