1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/rcu_notifier.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/completion.h> 31 #include <linux/moduleparam.h> 32 #include <linux/percpu.h> 33 #include <linux/notifier.h> 34 #include <linux/reboot.h> 35 #include <linux/freezer.h> 36 #include <linux/cpu.h> 37 #include <linux/delay.h> 38 #include <linux/stat.h> 39 #include <linux/srcu.h> 40 #include <linux/slab.h> 41 #include <linux/trace_clock.h> 42 #include <asm/byteorder.h> 43 #include <linux/torture.h> 44 #include <linux/vmalloc.h> 45 #include <linux/sched/debug.h> 46 #include <linux/sched/sysctl.h> 47 #include <linux/oom.h> 48 #include <linux/tick.h> 49 #include <linux/rcupdate_trace.h> 50 #include <linux/nmi.h> 51 52 #include "rcu.h" 53 54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility"); 55 MODULE_LICENSE("GPL"); 56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 57 58 // Bits for ->extendables field, extendables param, and related definitions. 59 #define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits. 60 #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1) 61 #define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits. 62 #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2) 63 #define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh. 64 #define RCUTORTURE_RDR_IRQ 0x02 // ... disabling interrupts. 65 #define RCUTORTURE_RDR_PREEMPT 0x04 // ... disabling preemption. 66 #define RCUTORTURE_RDR_RBH 0x08 // ... rcu_read_lock_bh(). 67 #define RCUTORTURE_RDR_SCHED 0x10 // ... rcu_read_lock_sched(). 68 #define RCUTORTURE_RDR_RCU_1 0x20 // ... entering another RCU reader. 69 #define RCUTORTURE_RDR_RCU_2 0x40 // ... entering another RCU reader. 70 #define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer. 71 // Note: Manual start, automatic end. 72 #define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above. 73 #define RCUTORTURE_MAX_EXTEND \ 74 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 75 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) // Intentionally omit RCUTORTURE_RDR_UPDOWN. 76 #define RCUTORTURE_RDR_ALLBITS \ 77 (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \ 78 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2) 79 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 80 /* Must be power of two minus one. */ 81 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 82 83 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 84 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 85 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 86 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 87 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 88 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 89 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 90 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 91 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 92 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 93 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 94 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 95 torture_param(bool, gp_cond_exp_full, false, 96 "Use conditional/async full-stateexpedited GP wait primitives"); 97 torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ, 98 "Wait interval for normal conditional grace periods, us (default 16 jiffies)"); 99 torture_param(int, gp_cond_wi_exp, 128, 100 "Wait interval for expedited conditional grace periods, us (default 128 us)"); 101 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 102 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 103 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 104 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 105 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 106 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 107 torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ, 108 "Wait interval for normal polled grace periods, us (default 16 jiffies)"); 109 torture_param(int, gp_poll_wi_exp, 128, 110 "Wait interval for expedited polled grace periods, us (default 128 us)"); 111 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 112 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 113 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 114 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 115 torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers"); 116 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 117 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 118 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 119 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 120 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 121 torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing"); 122 torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period."); 123 torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)"); 124 torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)"); 125 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 126 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 127 torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable"); 128 torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)"); 129 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 130 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 131 torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit."); 132 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 133 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 134 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 135 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 136 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 137 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 138 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 139 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one."); 140 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 141 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 142 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 143 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 144 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 145 torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds."); 146 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 147 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable."); 148 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 149 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario."); 150 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 151 152 static char *torture_type = "rcu"; 153 module_param(torture_type, charp, 0444); 154 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 155 156 static int nrealnocbers; 157 static int nrealreaders; 158 static int nrealfakewriters; 159 static struct task_struct *writer_task; 160 static struct task_struct **fakewriter_tasks; 161 static struct task_struct **reader_tasks; 162 static struct task_struct *updown_task; 163 static struct task_struct **nocb_tasks; 164 static struct task_struct *stats_task; 165 static struct task_struct *fqs_task; 166 static struct task_struct *boost_tasks[NR_CPUS]; 167 static struct task_struct *stall_task; 168 static struct task_struct **fwd_prog_tasks; 169 static struct task_struct **barrier_cbs_tasks; 170 static struct task_struct *barrier_task; 171 static struct task_struct *read_exit_task; 172 static struct task_struct *preempt_task; 173 174 #define RCU_TORTURE_PIPE_LEN 10 175 176 // Mailbox-like structure to check RCU global memory ordering. 177 struct rcu_torture_reader_check { 178 unsigned long rtc_myloops; 179 int rtc_chkrdr; 180 unsigned long rtc_chkloops; 181 int rtc_ready; 182 struct rcu_torture_reader_check *rtc_assigner; 183 } ____cacheline_internodealigned_in_smp; 184 185 // Update-side data structure used to check RCU readers. 186 struct rcu_torture { 187 struct rcu_head rtort_rcu; 188 int rtort_pipe_count; 189 struct list_head rtort_free; 190 int rtort_mbtest; 191 struct rcu_torture_reader_check *rtort_chkp; 192 }; 193 194 static LIST_HEAD(rcu_torture_freelist); 195 static struct rcu_torture __rcu *rcu_torture_current; 196 static unsigned long rcu_torture_current_version; 197 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 198 static DEFINE_SPINLOCK(rcu_torture_lock); 199 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 200 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 201 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 202 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 203 static atomic_t n_rcu_torture_alloc; 204 static atomic_t n_rcu_torture_alloc_fail; 205 static atomic_t n_rcu_torture_free; 206 static atomic_t n_rcu_torture_mberror; 207 static atomic_t n_rcu_torture_mbchk_fail; 208 static atomic_t n_rcu_torture_mbchk_tries; 209 static atomic_t n_rcu_torture_error; 210 static long n_rcu_torture_barrier_error; 211 static long n_rcu_torture_boost_ktrerror; 212 static long n_rcu_torture_boost_failure; 213 static long n_rcu_torture_boosts; 214 static atomic_long_t n_rcu_torture_timers; 215 static long n_barrier_attempts; 216 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 217 static unsigned long n_read_exits; 218 static struct list_head rcu_torture_removed; 219 static unsigned long shutdown_jiffies; 220 static unsigned long start_gp_seq; 221 static atomic_long_t n_nocb_offload; 222 static atomic_long_t n_nocb_deoffload; 223 224 static int rcu_torture_writer_state; 225 #define RTWS_FIXED_DELAY 0 226 #define RTWS_DELAY 1 227 #define RTWS_REPLACE 2 228 #define RTWS_DEF_FREE 3 229 #define RTWS_EXP_SYNC 4 230 #define RTWS_COND_GET 5 231 #define RTWS_COND_GET_FULL 6 232 #define RTWS_COND_GET_EXP 7 233 #define RTWS_COND_GET_EXP_FULL 8 234 #define RTWS_COND_SYNC 9 235 #define RTWS_COND_SYNC_FULL 10 236 #define RTWS_COND_SYNC_EXP 11 237 #define RTWS_COND_SYNC_EXP_FULL 12 238 #define RTWS_POLL_GET 13 239 #define RTWS_POLL_GET_FULL 14 240 #define RTWS_POLL_GET_EXP 15 241 #define RTWS_POLL_GET_EXP_FULL 16 242 #define RTWS_POLL_WAIT 17 243 #define RTWS_POLL_WAIT_FULL 18 244 #define RTWS_POLL_WAIT_EXP 19 245 #define RTWS_POLL_WAIT_EXP_FULL 20 246 #define RTWS_SYNC 21 247 #define RTWS_STUTTER 22 248 #define RTWS_STOPPING 23 249 static const char * const rcu_torture_writer_state_names[] = { 250 "RTWS_FIXED_DELAY", 251 "RTWS_DELAY", 252 "RTWS_REPLACE", 253 "RTWS_DEF_FREE", 254 "RTWS_EXP_SYNC", 255 "RTWS_COND_GET", 256 "RTWS_COND_GET_FULL", 257 "RTWS_COND_GET_EXP", 258 "RTWS_COND_GET_EXP_FULL", 259 "RTWS_COND_SYNC", 260 "RTWS_COND_SYNC_FULL", 261 "RTWS_COND_SYNC_EXP", 262 "RTWS_COND_SYNC_EXP_FULL", 263 "RTWS_POLL_GET", 264 "RTWS_POLL_GET_FULL", 265 "RTWS_POLL_GET_EXP", 266 "RTWS_POLL_GET_EXP_FULL", 267 "RTWS_POLL_WAIT", 268 "RTWS_POLL_WAIT_FULL", 269 "RTWS_POLL_WAIT_EXP", 270 "RTWS_POLL_WAIT_EXP_FULL", 271 "RTWS_SYNC", 272 "RTWS_STUTTER", 273 "RTWS_STOPPING", 274 }; 275 276 /* Record reader segment types and duration for first failing read. */ 277 struct rt_read_seg { 278 int rt_readstate; 279 unsigned long rt_delay_jiffies; 280 unsigned long rt_delay_ms; 281 unsigned long rt_delay_us; 282 bool rt_preempted; 283 int rt_cpu; 284 int rt_end_cpu; 285 unsigned long long rt_gp_seq; 286 unsigned long long rt_gp_seq_end; 287 u64 rt_ts; 288 }; 289 static int err_segs_recorded; 290 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 291 static int rt_read_nsegs; 292 static int rt_read_preempted; 293 294 static const char *rcu_torture_writer_state_getname(void) 295 { 296 unsigned int i = READ_ONCE(rcu_torture_writer_state); 297 298 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 299 return "???"; 300 return rcu_torture_writer_state_names[i]; 301 } 302 303 #ifdef CONFIG_RCU_TRACE 304 static u64 notrace rcu_trace_clock_local(void) 305 { 306 u64 ts = trace_clock_local(); 307 308 (void)do_div(ts, NSEC_PER_USEC); 309 return ts; 310 } 311 #else /* #ifdef CONFIG_RCU_TRACE */ 312 static u64 notrace rcu_trace_clock_local(void) 313 { 314 return 0ULL; 315 } 316 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 317 318 /* 319 * Stop aggressive CPU-hog tests a bit before the end of the test in order 320 * to avoid interfering with test shutdown. 321 */ 322 static bool shutdown_time_arrived(void) 323 { 324 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 325 } 326 327 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 328 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 329 /* and boost task create/destroy. */ 330 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 331 static bool barrier_phase; /* Test phase. */ 332 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 333 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 334 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 335 336 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 337 338 /* 339 * Allocate an element from the rcu_tortures pool. 340 */ 341 static struct rcu_torture * 342 rcu_torture_alloc(void) 343 { 344 struct list_head *p; 345 346 spin_lock_bh(&rcu_torture_lock); 347 if (list_empty(&rcu_torture_freelist)) { 348 atomic_inc(&n_rcu_torture_alloc_fail); 349 spin_unlock_bh(&rcu_torture_lock); 350 return NULL; 351 } 352 atomic_inc(&n_rcu_torture_alloc); 353 p = rcu_torture_freelist.next; 354 list_del_init(p); 355 spin_unlock_bh(&rcu_torture_lock); 356 return container_of(p, struct rcu_torture, rtort_free); 357 } 358 359 /* 360 * Free an element to the rcu_tortures pool. 361 */ 362 static void 363 rcu_torture_free(struct rcu_torture *p) 364 { 365 atomic_inc(&n_rcu_torture_free); 366 spin_lock_bh(&rcu_torture_lock); 367 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 368 spin_unlock_bh(&rcu_torture_lock); 369 } 370 371 /* 372 * Operations vector for selecting different types of tests. 373 */ 374 375 struct rcu_torture_ops { 376 int ttype; 377 void (*init)(void); 378 void (*cleanup)(void); 379 int (*readlock)(void); 380 void (*read_delay)(struct torture_random_state *rrsp, 381 struct rt_read_seg *rtrsp); 382 void (*readunlock)(int idx); 383 int (*readlock_held)(void); // lockdep. 384 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not. 385 int (*down_read)(void); 386 void (*up_read)(int idx); 387 unsigned long (*get_gp_seq)(void); 388 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 389 void (*deferred_free)(struct rcu_torture *p); 390 void (*sync)(void); 391 void (*exp_sync)(void); 392 unsigned long (*get_gp_state_exp)(void); 393 unsigned long (*start_gp_poll_exp)(void); 394 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 395 bool (*poll_gp_state_exp)(unsigned long oldstate); 396 void (*cond_sync_exp)(unsigned long oldstate); 397 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 398 unsigned long (*get_comp_state)(void); 399 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 400 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 401 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 402 unsigned long (*get_gp_state)(void); 403 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 404 unsigned long (*start_gp_poll)(void); 405 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 406 bool (*poll_gp_state)(unsigned long oldstate); 407 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 408 bool (*poll_need_2gp)(bool poll, bool poll_full); 409 void (*cond_sync)(unsigned long oldstate); 410 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 411 int poll_active; 412 int poll_active_full; 413 call_rcu_func_t call; 414 void (*cb_barrier)(void); 415 void (*fqs)(void); 416 void (*stats)(void); 417 void (*gp_kthread_dbg)(void); 418 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 419 int (*stall_dur)(void); 420 void (*get_gp_data)(int *flags, unsigned long *gp_seq); 421 void (*gp_slow_register)(atomic_t *rgssp); 422 void (*gp_slow_unregister)(atomic_t *rgssp); 423 bool (*reader_blocked)(void); 424 unsigned long long (*gather_gp_seqs)(void); 425 void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len); 426 void (*set_gpwrap_lag)(unsigned long lag); 427 int (*get_gpwrap_count)(int cpu); 428 long cbflood_max; 429 int irq_capable; 430 int can_boost; 431 int extendables; 432 int slow_gps; 433 int no_pi_lock; 434 int debug_objects; 435 int start_poll_irqsoff; 436 int have_up_down; 437 const char *name; 438 }; 439 440 static struct rcu_torture_ops *cur_ops; 441 442 /* 443 * Definitions for rcu torture testing. 444 */ 445 446 static int torture_readlock_not_held(void) 447 { 448 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 449 } 450 451 static int rcu_torture_read_lock(void) 452 { 453 rcu_read_lock(); 454 return 0; 455 } 456 457 static void 458 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 459 { 460 unsigned long started; 461 unsigned long completed; 462 const unsigned long shortdelay_us = 200; 463 unsigned long longdelay_ms = 300; 464 unsigned long long ts; 465 466 /* We want a short delay sometimes to make a reader delay the grace 467 * period, and we want a long delay occasionally to trigger 468 * force_quiescent_state. */ 469 470 if (!atomic_read(&rcu_fwd_cb_nodelay) && 471 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 472 started = cur_ops->get_gp_seq(); 473 ts = rcu_trace_clock_local(); 474 if ((preempt_count() & HARDIRQ_MASK) || softirq_count()) 475 longdelay_ms = 5; /* Avoid triggering BH limits. */ 476 mdelay(longdelay_ms); 477 rtrsp->rt_delay_ms = longdelay_ms; 478 completed = cur_ops->get_gp_seq(); 479 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 480 started, completed); 481 } 482 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 483 udelay(shortdelay_us); 484 rtrsp->rt_delay_us = shortdelay_us; 485 } 486 if (!preempt_count() && 487 !(torture_random(rrsp) % (nrealreaders * 500))) 488 torture_preempt_schedule(); /* QS only if preemptible. */ 489 } 490 491 static void rcu_torture_read_unlock(int idx) 492 { 493 rcu_read_unlock(); 494 } 495 496 static int rcu_torture_readlock_nesting(void) 497 { 498 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 499 return rcu_preempt_depth(); 500 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 501 return (preempt_count() & PREEMPT_MASK); 502 return -1; 503 } 504 505 /* 506 * Update callback in the pipe. This should be invoked after a grace period. 507 */ 508 static bool 509 rcu_torture_pipe_update_one(struct rcu_torture *rp) 510 { 511 int i; 512 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 513 514 if (rtrcp) { 515 WRITE_ONCE(rp->rtort_chkp, NULL); 516 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 517 } 518 i = rp->rtort_pipe_count; 519 if (i > RCU_TORTURE_PIPE_LEN) 520 i = RCU_TORTURE_PIPE_LEN; 521 atomic_inc(&rcu_torture_wcount[i]); 522 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 523 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 524 if (i + 1 >= RCU_TORTURE_PIPE_LEN) { 525 rp->rtort_mbtest = 0; 526 return true; 527 } 528 return false; 529 } 530 531 /* 532 * Update all callbacks in the pipe. Suitable for synchronous grace-period 533 * primitives. 534 */ 535 static void 536 rcu_torture_pipe_update(struct rcu_torture *old_rp) 537 { 538 struct rcu_torture *rp; 539 struct rcu_torture *rp1; 540 541 if (old_rp) 542 list_add(&old_rp->rtort_free, &rcu_torture_removed); 543 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 544 if (rcu_torture_pipe_update_one(rp)) { 545 list_del(&rp->rtort_free); 546 rcu_torture_free(rp); 547 } 548 } 549 } 550 551 static void 552 rcu_torture_cb(struct rcu_head *p) 553 { 554 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 555 556 if (torture_must_stop_irq()) { 557 /* Test is ending, just drop callbacks on the floor. */ 558 /* The next initialization will pick up the pieces. */ 559 return; 560 } 561 if (rcu_torture_pipe_update_one(rp)) 562 rcu_torture_free(rp); 563 else 564 cur_ops->deferred_free(rp); 565 } 566 567 static unsigned long rcu_no_completed(void) 568 { 569 return 0; 570 } 571 572 static void rcu_torture_deferred_free(struct rcu_torture *p) 573 { 574 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 575 } 576 577 static void rcu_sync_torture_init(void) 578 { 579 INIT_LIST_HEAD(&rcu_torture_removed); 580 } 581 582 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 583 { 584 return poll; 585 } 586 587 static struct rcu_torture_ops rcu_ops = { 588 .ttype = RCU_FLAVOR, 589 .init = rcu_sync_torture_init, 590 .readlock = rcu_torture_read_lock, 591 .read_delay = rcu_read_delay, 592 .readunlock = rcu_torture_read_unlock, 593 .readlock_held = torture_readlock_not_held, 594 .readlock_nesting = rcu_torture_readlock_nesting, 595 .get_gp_seq = rcu_get_gp_seq, 596 .gp_diff = rcu_seq_diff, 597 .deferred_free = rcu_torture_deferred_free, 598 .sync = synchronize_rcu, 599 .exp_sync = synchronize_rcu_expedited, 600 .same_gp_state = same_state_synchronize_rcu, 601 .same_gp_state_full = same_state_synchronize_rcu_full, 602 .get_comp_state = get_completed_synchronize_rcu, 603 .get_comp_state_full = get_completed_synchronize_rcu_full, 604 .get_gp_state = get_state_synchronize_rcu, 605 .get_gp_state_full = get_state_synchronize_rcu_full, 606 .start_gp_poll = start_poll_synchronize_rcu, 607 .start_gp_poll_full = start_poll_synchronize_rcu_full, 608 .poll_gp_state = poll_state_synchronize_rcu, 609 .poll_gp_state_full = poll_state_synchronize_rcu_full, 610 .poll_need_2gp = rcu_poll_need_2gp, 611 .cond_sync = cond_synchronize_rcu, 612 .cond_sync_full = cond_synchronize_rcu_full, 613 .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE, 614 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE, 615 .get_gp_state_exp = get_state_synchronize_rcu, 616 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 617 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 618 .poll_gp_state_exp = poll_state_synchronize_rcu, 619 .cond_sync_exp = cond_synchronize_rcu_expedited, 620 .cond_sync_exp_full = cond_synchronize_rcu_expedited_full, 621 .call = call_rcu_hurry, 622 .cb_barrier = rcu_barrier, 623 .fqs = rcu_force_quiescent_state, 624 .gp_kthread_dbg = show_rcu_gp_kthreads, 625 .check_boost_failed = rcu_check_boost_fail, 626 .stall_dur = rcu_jiffies_till_stall_check, 627 .get_gp_data = rcutorture_get_gp_data, 628 .gp_slow_register = rcu_gp_slow_register, 629 .gp_slow_unregister = rcu_gp_slow_unregister, 630 .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU) 631 ? has_rcu_reader_blocked 632 : NULL, 633 .gather_gp_seqs = rcutorture_gather_gp_seqs, 634 .format_gp_seqs = rcutorture_format_gp_seqs, 635 .set_gpwrap_lag = rcu_set_gpwrap_lag, 636 .get_gpwrap_count = rcu_get_gpwrap_count, 637 .irq_capable = 1, 638 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 639 .extendables = RCUTORTURE_MAX_EXTEND, 640 .debug_objects = 1, 641 .start_poll_irqsoff = 1, 642 .name = "rcu" 643 }; 644 645 /* 646 * Don't even think about trying any of these in real life!!! 647 * The names includes "busted", and they really means it! 648 * The only purpose of these functions is to provide a buggy RCU 649 * implementation to make sure that rcutorture correctly emits 650 * buggy-RCU error messages. 651 */ 652 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 653 { 654 /* This is a deliberate bug for testing purposes only! */ 655 rcu_torture_cb(&p->rtort_rcu); 656 } 657 658 static void synchronize_rcu_busted(void) 659 { 660 /* This is a deliberate bug for testing purposes only! */ 661 } 662 663 static void 664 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 665 { 666 /* This is a deliberate bug for testing purposes only! */ 667 func(head); 668 } 669 670 static struct rcu_torture_ops rcu_busted_ops = { 671 .ttype = INVALID_RCU_FLAVOR, 672 .init = rcu_sync_torture_init, 673 .readlock = rcu_torture_read_lock, 674 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 675 .readunlock = rcu_torture_read_unlock, 676 .readlock_held = torture_readlock_not_held, 677 .get_gp_seq = rcu_no_completed, 678 .deferred_free = rcu_busted_torture_deferred_free, 679 .sync = synchronize_rcu_busted, 680 .exp_sync = synchronize_rcu_busted, 681 .call = call_rcu_busted, 682 .gather_gp_seqs = rcutorture_gather_gp_seqs, 683 .format_gp_seqs = rcutorture_format_gp_seqs, 684 .irq_capable = 1, 685 .extendables = RCUTORTURE_MAX_EXTEND, 686 .name = "busted" 687 }; 688 689 /* 690 * Definitions for srcu torture testing. 691 */ 692 693 DEFINE_STATIC_SRCU(srcu_ctl); 694 static struct srcu_struct srcu_ctld; 695 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 696 static struct rcu_torture_ops srcud_ops; 697 698 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq) 699 { 700 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq); 701 } 702 703 static int srcu_torture_read_lock(void) 704 { 705 int idx; 706 struct srcu_ctr __percpu *scp; 707 int ret = 0; 708 709 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); 710 711 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 712 idx = srcu_read_lock(srcu_ctlp); 713 WARN_ON_ONCE(idx & ~0x1); 714 ret += idx; 715 } 716 if (reader_flavor & SRCU_READ_FLAVOR_NMI) { 717 idx = srcu_read_lock_nmisafe(srcu_ctlp); 718 WARN_ON_ONCE(idx & ~0x1); 719 ret += idx << 1; 720 } 721 if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 722 scp = srcu_read_lock_fast(srcu_ctlp); 723 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 724 WARN_ON_ONCE(idx & ~0x1); 725 ret += idx << 3; 726 } 727 return ret; 728 } 729 730 static void 731 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 732 { 733 long delay; 734 const long uspertick = 1000000 / HZ; 735 const long longdelay = 10; 736 737 /* We want there to be long-running readers, but not all the time. */ 738 739 delay = torture_random(rrsp) % 740 (nrealreaders * 2 * longdelay * uspertick); 741 if (!delay && in_task()) { 742 schedule_timeout_interruptible(longdelay); 743 rtrsp->rt_delay_jiffies = longdelay; 744 } else { 745 rcu_read_delay(rrsp, rtrsp); 746 } 747 } 748 749 static void srcu_torture_read_unlock(int idx) 750 { 751 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 752 if (reader_flavor & SRCU_READ_FLAVOR_FAST) 753 srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3)); 754 if (reader_flavor & SRCU_READ_FLAVOR_NMI) 755 srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1); 756 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 757 srcu_read_unlock(srcu_ctlp, idx & 0x1); 758 } 759 760 static int torture_srcu_read_lock_held(void) 761 { 762 return srcu_read_lock_held(srcu_ctlp); 763 } 764 765 static bool srcu_torture_have_up_down(void) 766 { 767 int rf = reader_flavor; 768 769 if (!rf) 770 rf = SRCU_READ_FLAVOR_NORMAL; 771 return !!(cur_ops->have_up_down & rf); 772 } 773 774 static int srcu_torture_down_read(void) 775 { 776 int idx; 777 struct srcu_ctr __percpu *scp; 778 779 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL); 780 WARN_ON_ONCE(reader_flavor & (reader_flavor - 1)); 781 782 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) { 783 idx = srcu_down_read(srcu_ctlp); 784 WARN_ON_ONCE(idx & ~0x1); 785 return idx; 786 } 787 if (reader_flavor & SRCU_READ_FLAVOR_FAST) { 788 scp = srcu_down_read_fast(srcu_ctlp); 789 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp); 790 WARN_ON_ONCE(idx & ~0x1); 791 return idx << 3; 792 } 793 WARN_ON_ONCE(1); 794 return 0; 795 } 796 797 static void srcu_torture_up_read(int idx) 798 { 799 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1))); 800 if (reader_flavor & SRCU_READ_FLAVOR_FAST) 801 srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3)); 802 else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || 803 !(reader_flavor & SRCU_READ_FLAVOR_ALL)) 804 srcu_up_read(srcu_ctlp, idx & 0x1); 805 else 806 WARN_ON_ONCE(1); 807 } 808 809 static unsigned long srcu_torture_completed(void) 810 { 811 return srcu_batches_completed(srcu_ctlp); 812 } 813 814 static void srcu_torture_deferred_free(struct rcu_torture *rp) 815 { 816 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 817 } 818 819 static void srcu_torture_synchronize(void) 820 { 821 synchronize_srcu(srcu_ctlp); 822 } 823 824 static unsigned long srcu_torture_get_gp_state(void) 825 { 826 return get_state_synchronize_srcu(srcu_ctlp); 827 } 828 829 static unsigned long srcu_torture_start_gp_poll(void) 830 { 831 return start_poll_synchronize_srcu(srcu_ctlp); 832 } 833 834 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 835 { 836 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 837 } 838 839 static void srcu_torture_call(struct rcu_head *head, 840 rcu_callback_t func) 841 { 842 call_srcu(srcu_ctlp, head, func); 843 } 844 845 static void srcu_torture_barrier(void) 846 { 847 srcu_barrier(srcu_ctlp); 848 } 849 850 static void srcu_torture_stats(void) 851 { 852 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 853 } 854 855 static void srcu_torture_synchronize_expedited(void) 856 { 857 synchronize_srcu_expedited(srcu_ctlp); 858 } 859 860 static struct rcu_torture_ops srcu_ops = { 861 .ttype = SRCU_FLAVOR, 862 .init = rcu_sync_torture_init, 863 .readlock = srcu_torture_read_lock, 864 .read_delay = srcu_read_delay, 865 .readunlock = srcu_torture_read_unlock, 866 .down_read = srcu_torture_down_read, 867 .up_read = srcu_torture_up_read, 868 .readlock_held = torture_srcu_read_lock_held, 869 .get_gp_seq = srcu_torture_completed, 870 .gp_diff = rcu_seq_diff, 871 .deferred_free = srcu_torture_deferred_free, 872 .sync = srcu_torture_synchronize, 873 .exp_sync = srcu_torture_synchronize_expedited, 874 .same_gp_state = same_state_synchronize_srcu, 875 .get_comp_state = get_completed_synchronize_srcu, 876 .get_gp_state = srcu_torture_get_gp_state, 877 .start_gp_poll = srcu_torture_start_gp_poll, 878 .poll_gp_state = srcu_torture_poll_gp_state, 879 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 880 .call = srcu_torture_call, 881 .cb_barrier = srcu_torture_barrier, 882 .stats = srcu_torture_stats, 883 .get_gp_data = srcu_get_gp_data, 884 .cbflood_max = 50000, 885 .irq_capable = 1, 886 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 887 .debug_objects = 1, 888 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) 889 ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST, 890 .name = "srcu" 891 }; 892 893 static void srcu_torture_init(void) 894 { 895 rcu_sync_torture_init(); 896 WARN_ON(init_srcu_struct(&srcu_ctld)); 897 srcu_ctlp = &srcu_ctld; 898 } 899 900 static void srcu_torture_cleanup(void) 901 { 902 cleanup_srcu_struct(&srcu_ctld); 903 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 904 } 905 906 /* As above, but dynamically allocated. */ 907 static struct rcu_torture_ops srcud_ops = { 908 .ttype = SRCU_FLAVOR, 909 .init = srcu_torture_init, 910 .cleanup = srcu_torture_cleanup, 911 .readlock = srcu_torture_read_lock, 912 .read_delay = srcu_read_delay, 913 .readunlock = srcu_torture_read_unlock, 914 .readlock_held = torture_srcu_read_lock_held, 915 .down_read = srcu_torture_down_read, 916 .up_read = srcu_torture_up_read, 917 .get_gp_seq = srcu_torture_completed, 918 .gp_diff = rcu_seq_diff, 919 .deferred_free = srcu_torture_deferred_free, 920 .sync = srcu_torture_synchronize, 921 .exp_sync = srcu_torture_synchronize_expedited, 922 .same_gp_state = same_state_synchronize_srcu, 923 .get_comp_state = get_completed_synchronize_srcu, 924 .get_gp_state = srcu_torture_get_gp_state, 925 .start_gp_poll = srcu_torture_start_gp_poll, 926 .poll_gp_state = srcu_torture_poll_gp_state, 927 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE, 928 .call = srcu_torture_call, 929 .cb_barrier = srcu_torture_barrier, 930 .stats = srcu_torture_stats, 931 .get_gp_data = srcu_get_gp_data, 932 .cbflood_max = 50000, 933 .irq_capable = 1, 934 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 935 .debug_objects = 1, 936 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU) 937 ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST, 938 .name = "srcud" 939 }; 940 941 /* As above, but broken due to inappropriate reader extension. */ 942 static struct rcu_torture_ops busted_srcud_ops = { 943 .ttype = SRCU_FLAVOR, 944 .init = srcu_torture_init, 945 .cleanup = srcu_torture_cleanup, 946 .readlock = srcu_torture_read_lock, 947 .read_delay = rcu_read_delay, 948 .readunlock = srcu_torture_read_unlock, 949 .readlock_held = torture_srcu_read_lock_held, 950 .get_gp_seq = srcu_torture_completed, 951 .deferred_free = srcu_torture_deferred_free, 952 .sync = srcu_torture_synchronize, 953 .exp_sync = srcu_torture_synchronize_expedited, 954 .call = srcu_torture_call, 955 .cb_barrier = srcu_torture_barrier, 956 .stats = srcu_torture_stats, 957 .irq_capable = 1, 958 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 959 .extendables = RCUTORTURE_MAX_EXTEND, 960 .name = "busted_srcud" 961 }; 962 963 /* 964 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 965 * This implementation does not work well with CPU hotplug nor 966 * with rcutorture's shuffling. 967 */ 968 969 static void synchronize_rcu_trivial(void) 970 { 971 int cpu; 972 973 for_each_online_cpu(cpu) { 974 torture_sched_setaffinity(current->pid, cpumask_of(cpu), true); 975 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 976 } 977 } 978 979 static void rcu_sync_torture_init_trivial(void) 980 { 981 rcu_sync_torture_init(); 982 // if (onoff_interval || shuffle_interval) { 983 if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) { 984 onoff_interval = 0; 985 shuffle_interval = 0; 986 } 987 } 988 989 static int rcu_torture_read_lock_trivial(void) 990 { 991 preempt_disable(); 992 return 0; 993 } 994 995 static void rcu_torture_read_unlock_trivial(int idx) 996 { 997 preempt_enable(); 998 } 999 1000 static struct rcu_torture_ops trivial_ops = { 1001 .ttype = RCU_TRIVIAL_FLAVOR, 1002 .init = rcu_sync_torture_init_trivial, 1003 .readlock = rcu_torture_read_lock_trivial, 1004 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1005 .readunlock = rcu_torture_read_unlock_trivial, 1006 .readlock_held = torture_readlock_not_held, 1007 .get_gp_seq = rcu_no_completed, 1008 .sync = synchronize_rcu_trivial, 1009 .exp_sync = synchronize_rcu_trivial, 1010 .irq_capable = 1, 1011 .name = "trivial" 1012 }; 1013 1014 #ifdef CONFIG_TASKS_RCU 1015 1016 /* 1017 * Definitions for RCU-tasks torture testing. 1018 */ 1019 1020 static int tasks_torture_read_lock(void) 1021 { 1022 return 0; 1023 } 1024 1025 static void tasks_torture_read_unlock(int idx) 1026 { 1027 } 1028 1029 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 1030 { 1031 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 1032 } 1033 1034 static void synchronize_rcu_mult_test(void) 1035 { 1036 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 1037 } 1038 1039 static struct rcu_torture_ops tasks_ops = { 1040 .ttype = RCU_TASKS_FLAVOR, 1041 .init = rcu_sync_torture_init, 1042 .readlock = tasks_torture_read_lock, 1043 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1044 .readunlock = tasks_torture_read_unlock, 1045 .get_gp_seq = rcu_no_completed, 1046 .deferred_free = rcu_tasks_torture_deferred_free, 1047 .sync = synchronize_rcu_tasks, 1048 .exp_sync = synchronize_rcu_mult_test, 1049 .call = call_rcu_tasks, 1050 .cb_barrier = rcu_barrier_tasks, 1051 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 1052 .get_gp_data = rcu_tasks_get_gp_data, 1053 .irq_capable = 1, 1054 .slow_gps = 1, 1055 .name = "tasks" 1056 }; 1057 1058 #define TASKS_OPS &tasks_ops, 1059 1060 #else // #ifdef CONFIG_TASKS_RCU 1061 1062 #define TASKS_OPS 1063 1064 #endif // #else #ifdef CONFIG_TASKS_RCU 1065 1066 1067 #ifdef CONFIG_TASKS_RUDE_RCU 1068 1069 /* 1070 * Definitions for rude RCU-tasks torture testing. 1071 */ 1072 1073 static struct rcu_torture_ops tasks_rude_ops = { 1074 .ttype = RCU_TASKS_RUDE_FLAVOR, 1075 .init = rcu_sync_torture_init, 1076 .readlock = rcu_torture_read_lock_trivial, 1077 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 1078 .readunlock = rcu_torture_read_unlock_trivial, 1079 .get_gp_seq = rcu_no_completed, 1080 .sync = synchronize_rcu_tasks_rude, 1081 .exp_sync = synchronize_rcu_tasks_rude, 1082 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 1083 .get_gp_data = rcu_tasks_rude_get_gp_data, 1084 .cbflood_max = 50000, 1085 .irq_capable = 1, 1086 .name = "tasks-rude" 1087 }; 1088 1089 #define TASKS_RUDE_OPS &tasks_rude_ops, 1090 1091 #else // #ifdef CONFIG_TASKS_RUDE_RCU 1092 1093 #define TASKS_RUDE_OPS 1094 1095 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 1096 1097 1098 #ifdef CONFIG_TASKS_TRACE_RCU 1099 1100 /* 1101 * Definitions for tracing RCU-tasks torture testing. 1102 */ 1103 1104 static int tasks_tracing_torture_read_lock(void) 1105 { 1106 rcu_read_lock_trace(); 1107 return 0; 1108 } 1109 1110 static void tasks_tracing_torture_read_unlock(int idx) 1111 { 1112 rcu_read_unlock_trace(); 1113 } 1114 1115 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 1116 { 1117 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 1118 } 1119 1120 static struct rcu_torture_ops tasks_tracing_ops = { 1121 .ttype = RCU_TASKS_TRACING_FLAVOR, 1122 .init = rcu_sync_torture_init, 1123 .readlock = tasks_tracing_torture_read_lock, 1124 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 1125 .readunlock = tasks_tracing_torture_read_unlock, 1126 .readlock_held = rcu_read_lock_trace_held, 1127 .get_gp_seq = rcu_no_completed, 1128 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 1129 .sync = synchronize_rcu_tasks_trace, 1130 .exp_sync = synchronize_rcu_tasks_trace, 1131 .call = call_rcu_tasks_trace, 1132 .cb_barrier = rcu_barrier_tasks_trace, 1133 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 1134 .get_gp_data = rcu_tasks_trace_get_gp_data, 1135 .cbflood_max = 50000, 1136 .irq_capable = 1, 1137 .slow_gps = 1, 1138 .name = "tasks-tracing" 1139 }; 1140 1141 #define TASKS_TRACING_OPS &tasks_tracing_ops, 1142 1143 #else // #ifdef CONFIG_TASKS_TRACE_RCU 1144 1145 #define TASKS_TRACING_OPS 1146 1147 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 1148 1149 1150 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 1151 { 1152 if (!cur_ops->gp_diff) 1153 return new - old; 1154 return cur_ops->gp_diff(new, old); 1155 } 1156 1157 /* 1158 * RCU torture priority-boost testing. Runs one real-time thread per 1159 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1160 * for them to complete. If a given grace period takes too long, we assume 1161 * that priority inversion has occurred. 1162 */ 1163 1164 static int old_rt_runtime = -1; 1165 1166 static void rcu_torture_disable_rt_throttle(void) 1167 { 1168 /* 1169 * Disable RT throttling so that rcutorture's boost threads don't get 1170 * throttled. Only possible if rcutorture is built-in otherwise the 1171 * user should manually do this by setting the sched_rt_period_us and 1172 * sched_rt_runtime sysctls. 1173 */ 1174 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1175 return; 1176 1177 old_rt_runtime = sysctl_sched_rt_runtime; 1178 sysctl_sched_rt_runtime = -1; 1179 } 1180 1181 static void rcu_torture_enable_rt_throttle(void) 1182 { 1183 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1184 return; 1185 1186 sysctl_sched_rt_runtime = old_rt_runtime; 1187 old_rt_runtime = -1; 1188 } 1189 1190 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1191 { 1192 int cpu; 1193 static int dbg_done; 1194 unsigned long end = jiffies; 1195 bool gp_done; 1196 unsigned long j; 1197 static unsigned long last_persist; 1198 unsigned long lp; 1199 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1200 1201 if (end - *start > mininterval) { 1202 // Recheck after checking time to avoid false positives. 1203 smp_mb(); // Time check before grace-period check. 1204 if (cur_ops->poll_gp_state(gp_state)) 1205 return false; // passed, though perhaps just barely 1206 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1207 // At most one persisted message per boost test. 1208 j = jiffies; 1209 lp = READ_ONCE(last_persist); 1210 if (time_after(j, lp + mininterval) && 1211 cmpxchg(&last_persist, lp, j) == lp) { 1212 if (cpu < 0) 1213 pr_info("Boost inversion persisted: QS from all CPUs\n"); 1214 else 1215 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1216 } 1217 return false; // passed on a technicality 1218 } 1219 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1220 n_rcu_torture_boost_failure++; 1221 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1222 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1223 current->rt_priority, gp_state, end - *start); 1224 cur_ops->gp_kthread_dbg(); 1225 // Recheck after print to flag grace period ending during splat. 1226 gp_done = cur_ops->poll_gp_state(gp_state); 1227 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1228 gp_done ? "ended already" : "still pending"); 1229 1230 } 1231 1232 return true; // failed 1233 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1234 *start = jiffies; 1235 } 1236 1237 return false; // passed 1238 } 1239 1240 static int rcu_torture_boost(void *arg) 1241 { 1242 unsigned long endtime; 1243 unsigned long gp_state; 1244 unsigned long gp_state_time; 1245 unsigned long oldstarttime; 1246 unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ; 1247 1248 if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) { 1249 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1250 } else { 1251 VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period"); 1252 while (time_before(jiffies, booststarttime)) { 1253 schedule_timeout_idle(HZ); 1254 if (kthread_should_stop()) 1255 goto cleanup; 1256 } 1257 VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period"); 1258 } 1259 1260 /* Set real-time priority. */ 1261 sched_set_fifo_low(current); 1262 1263 /* Each pass through the following loop does one boost-test cycle. */ 1264 do { 1265 bool failed = false; // Test failed already in this test interval 1266 bool gp_initiated = false; 1267 1268 if (kthread_should_stop()) 1269 goto checkwait; 1270 1271 /* Wait for the next test interval. */ 1272 oldstarttime = READ_ONCE(boost_starttime); 1273 while (time_before(jiffies, oldstarttime)) { 1274 schedule_timeout_interruptible(oldstarttime - jiffies); 1275 if (stutter_wait("rcu_torture_boost")) 1276 sched_set_fifo_low(current); 1277 if (torture_must_stop()) 1278 goto checkwait; 1279 } 1280 1281 // Do one boost-test interval. 1282 endtime = oldstarttime + test_boost_duration * HZ; 1283 while (time_before(jiffies, endtime)) { 1284 // Has current GP gone too long? 1285 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1286 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1287 // If we don't have a grace period in flight, start one. 1288 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1289 gp_state = cur_ops->start_gp_poll(); 1290 gp_initiated = true; 1291 gp_state_time = jiffies; 1292 } 1293 if (stutter_wait("rcu_torture_boost")) { 1294 sched_set_fifo_low(current); 1295 // If the grace period already ended, 1296 // we don't know when that happened, so 1297 // start over. 1298 if (cur_ops->poll_gp_state(gp_state)) 1299 gp_initiated = false; 1300 } 1301 if (torture_must_stop()) 1302 goto checkwait; 1303 } 1304 1305 // In case the grace period extended beyond the end of the loop. 1306 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1307 rcu_torture_boost_failed(gp_state, &gp_state_time); 1308 1309 /* 1310 * Set the start time of the next test interval. 1311 * Yes, this is vulnerable to long delays, but such 1312 * delays simply cause a false negative for the next 1313 * interval. Besides, we are running at RT priority, 1314 * so delays should be relatively rare. 1315 */ 1316 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1317 if (mutex_trylock(&boost_mutex)) { 1318 if (oldstarttime == boost_starttime) { 1319 WRITE_ONCE(boost_starttime, 1320 jiffies + test_boost_interval * HZ); 1321 n_rcu_torture_boosts++; 1322 } 1323 mutex_unlock(&boost_mutex); 1324 break; 1325 } 1326 schedule_timeout_uninterruptible(HZ / 20); 1327 } 1328 1329 /* Go do the stutter. */ 1330 checkwait: if (stutter_wait("rcu_torture_boost")) 1331 sched_set_fifo_low(current); 1332 } while (!torture_must_stop()); 1333 1334 cleanup: 1335 /* Clean up and exit. */ 1336 while (!kthread_should_stop()) { 1337 torture_shutdown_absorb("rcu_torture_boost"); 1338 schedule_timeout_uninterruptible(HZ / 20); 1339 } 1340 torture_kthread_stopping("rcu_torture_boost"); 1341 return 0; 1342 } 1343 1344 /* 1345 * RCU torture force-quiescent-state kthread. Repeatedly induces 1346 * bursts of calls to force_quiescent_state(), increasing the probability 1347 * of occurrence of some important types of race conditions. 1348 */ 1349 static int 1350 rcu_torture_fqs(void *arg) 1351 { 1352 unsigned long fqs_resume_time; 1353 int fqs_burst_remaining; 1354 int oldnice = task_nice(current); 1355 1356 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1357 do { 1358 fqs_resume_time = jiffies + fqs_stutter * HZ; 1359 while (time_before(jiffies, fqs_resume_time) && 1360 !kthread_should_stop()) { 1361 schedule_timeout_interruptible(HZ / 20); 1362 } 1363 fqs_burst_remaining = fqs_duration; 1364 while (fqs_burst_remaining > 0 && 1365 !kthread_should_stop()) { 1366 cur_ops->fqs(); 1367 udelay(fqs_holdoff); 1368 fqs_burst_remaining -= fqs_holdoff; 1369 } 1370 if (stutter_wait("rcu_torture_fqs")) 1371 sched_set_normal(current, oldnice); 1372 } while (!torture_must_stop()); 1373 torture_kthread_stopping("rcu_torture_fqs"); 1374 return 0; 1375 } 1376 1377 // Used by writers to randomly choose from the available grace-period primitives. 1378 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1379 static int nsynctypes; 1380 1381 /* 1382 * Determine which grace-period primitives are available. 1383 */ 1384 static void rcu_torture_write_types(void) 1385 { 1386 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1387 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1388 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1389 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1390 1391 /* Initialize synctype[] array. If none set, take default. */ 1392 if (!gp_cond1 && 1393 !gp_cond_exp1 && 1394 !gp_cond_full1 && 1395 !gp_cond_exp_full1 && 1396 !gp_exp1 && 1397 !gp_poll_exp1 && 1398 !gp_poll_exp_full1 && 1399 !gp_normal1 && 1400 !gp_poll1 && 1401 !gp_poll_full1 && 1402 !gp_sync1) { 1403 gp_cond1 = true; 1404 gp_cond_exp1 = true; 1405 gp_cond_full1 = true; 1406 gp_cond_exp_full1 = true; 1407 gp_exp1 = true; 1408 gp_poll_exp1 = true; 1409 gp_poll_exp_full1 = true; 1410 gp_normal1 = true; 1411 gp_poll1 = true; 1412 gp_poll_full1 = true; 1413 gp_sync1 = true; 1414 } 1415 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1416 synctype[nsynctypes++] = RTWS_COND_GET; 1417 pr_info("%s: Testing conditional GPs.\n", __func__); 1418 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1419 pr_alert("%s: gp_cond without primitives.\n", __func__); 1420 } 1421 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1422 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1423 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1424 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1425 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1426 } 1427 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1428 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1429 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1430 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1431 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1432 } 1433 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1434 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1435 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1436 } else if (gp_cond_exp_full && 1437 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1438 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1439 } 1440 if (gp_exp1 && cur_ops->exp_sync) { 1441 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1442 pr_info("%s: Testing expedited GPs.\n", __func__); 1443 } else if (gp_exp && !cur_ops->exp_sync) { 1444 pr_alert("%s: gp_exp without primitives.\n", __func__); 1445 } 1446 if (gp_normal1 && cur_ops->deferred_free) { 1447 synctype[nsynctypes++] = RTWS_DEF_FREE; 1448 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1449 } else if (gp_normal && !cur_ops->deferred_free) { 1450 pr_alert("%s: gp_normal without primitives.\n", __func__); 1451 } 1452 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1453 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1454 synctype[nsynctypes++] = RTWS_POLL_GET; 1455 pr_info("%s: Testing polling GPs.\n", __func__); 1456 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1457 pr_alert("%s: gp_poll without primitives.\n", __func__); 1458 } 1459 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1460 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1461 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1462 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1463 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1464 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1465 } 1466 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1467 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1468 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1469 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1470 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1471 } 1472 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1473 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1474 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1475 } else if (gp_poll_exp_full && 1476 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1477 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1478 } 1479 if (gp_sync1 && cur_ops->sync) { 1480 synctype[nsynctypes++] = RTWS_SYNC; 1481 pr_info("%s: Testing normal GPs.\n", __func__); 1482 } else if (gp_sync && !cur_ops->sync) { 1483 pr_alert("%s: gp_sync without primitives.\n", __func__); 1484 } 1485 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes); 1486 pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp); 1487 } 1488 1489 /* 1490 * Do the specified rcu_torture_writer() synchronous grace period, 1491 * while also testing out the polled APIs. Note well that the single-CPU 1492 * grace-period optimizations must be accounted for. 1493 */ 1494 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1495 { 1496 unsigned long cookie; 1497 struct rcu_gp_oldstate cookie_full; 1498 bool dopoll; 1499 bool dopoll_full; 1500 unsigned long r = torture_random(trsp); 1501 1502 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1503 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1504 if (dopoll || dopoll_full) 1505 cpus_read_lock(); 1506 if (dopoll) 1507 cookie = cur_ops->get_gp_state(); 1508 if (dopoll_full) 1509 cur_ops->get_gp_state_full(&cookie_full); 1510 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1511 sync(); 1512 sync(); 1513 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1514 "%s: Cookie check 3 failed %pS() online %*pbl.", 1515 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1516 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1517 "%s: Cookie check 4 failed %pS() online %*pbl", 1518 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1519 if (dopoll || dopoll_full) 1520 cpus_read_unlock(); 1521 } 1522 1523 /* 1524 * RCU torture writer kthread. Repeatedly substitutes a new structure 1525 * for that pointed to by rcu_torture_current, freeing the old structure 1526 * after a series of grace periods (the "pipeline"). 1527 */ 1528 static int 1529 rcu_torture_writer(void *arg) 1530 { 1531 bool boot_ended; 1532 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1533 unsigned long cookie; 1534 struct rcu_gp_oldstate cookie_full; 1535 int expediting = 0; 1536 unsigned long gp_snap; 1537 unsigned long gp_snap1; 1538 struct rcu_gp_oldstate gp_snap_full; 1539 struct rcu_gp_oldstate gp_snap1_full; 1540 int i; 1541 int idx; 1542 int oldnice = task_nice(current); 1543 struct rcu_gp_oldstate *rgo = NULL; 1544 int rgo_size = 0; 1545 struct rcu_torture *rp; 1546 struct rcu_torture *old_rp; 1547 static DEFINE_TORTURE_RANDOM(rand); 1548 unsigned long stallsdone = jiffies; 1549 bool stutter_waited; 1550 unsigned long *ulo = NULL; 1551 int ulo_size = 0; 1552 1553 // If a new stall test is added, this must be adjusted. 1554 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) 1555 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * 1556 HZ * (stall_cpu_repeat + 1); 1557 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1558 if (!can_expedite) 1559 pr_alert("%s" TORTURE_FLAG 1560 " GP expediting controlled from boot/sysfs for %s.\n", 1561 torture_type, cur_ops->name); 1562 if (WARN_ONCE(nsynctypes == 0, 1563 "%s: No update-side primitives.\n", __func__)) { 1564 /* 1565 * No updates primitives, so don't try updating. 1566 * The resulting test won't be testing much, hence the 1567 * above WARN_ONCE(). 1568 */ 1569 rcu_torture_writer_state = RTWS_STOPPING; 1570 torture_kthread_stopping("rcu_torture_writer"); 1571 return 0; 1572 } 1573 if (cur_ops->poll_active > 0) { 1574 ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL); 1575 if (!WARN_ON(!ulo)) 1576 ulo_size = cur_ops->poll_active; 1577 } 1578 if (cur_ops->poll_active_full > 0) { 1579 rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL); 1580 if (!WARN_ON(!rgo)) 1581 rgo_size = cur_ops->poll_active_full; 1582 } 1583 1584 do { 1585 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1586 torture_hrtimeout_us(500, 1000, &rand); 1587 rp = rcu_torture_alloc(); 1588 if (rp == NULL) 1589 continue; 1590 rp->rtort_pipe_count = 0; 1591 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); 1592 rcu_torture_writer_state = RTWS_DELAY; 1593 udelay(torture_random(&rand) & 0x3ff); 1594 rcu_torture_writer_state = RTWS_REPLACE; 1595 old_rp = rcu_dereference_check(rcu_torture_current, 1596 current == writer_task); 1597 rp->rtort_mbtest = 1; 1598 rcu_assign_pointer(rcu_torture_current, rp); 1599 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1600 if (old_rp) { 1601 i = old_rp->rtort_pipe_count; 1602 if (i > RCU_TORTURE_PIPE_LEN) 1603 i = RCU_TORTURE_PIPE_LEN; 1604 atomic_inc(&rcu_torture_wcount[i]); 1605 WRITE_ONCE(old_rp->rtort_pipe_count, 1606 old_rp->rtort_pipe_count + 1); 1607 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count); 1608 1609 // Make sure readers block polled grace periods. 1610 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1611 idx = cur_ops->readlock(); 1612 cookie = cur_ops->get_gp_state(); 1613 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1614 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1615 __func__, 1616 rcu_torture_writer_state_getname(), 1617 rcu_torture_writer_state, 1618 cookie, cur_ops->get_gp_state()); 1619 if (cur_ops->get_comp_state) { 1620 cookie = cur_ops->get_comp_state(); 1621 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1622 } 1623 cur_ops->readunlock(idx); 1624 } 1625 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1626 idx = cur_ops->readlock(); 1627 cur_ops->get_gp_state_full(&cookie_full); 1628 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1629 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1630 __func__, 1631 rcu_torture_writer_state_getname(), 1632 rcu_torture_writer_state, 1633 cpumask_pr_args(cpu_online_mask)); 1634 if (cur_ops->get_comp_state_full) { 1635 cur_ops->get_comp_state_full(&cookie_full); 1636 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1637 } 1638 cur_ops->readunlock(idx); 1639 } 1640 switch (synctype[torture_random(&rand) % nsynctypes]) { 1641 case RTWS_DEF_FREE: 1642 rcu_torture_writer_state = RTWS_DEF_FREE; 1643 cur_ops->deferred_free(old_rp); 1644 break; 1645 case RTWS_EXP_SYNC: 1646 rcu_torture_writer_state = RTWS_EXP_SYNC; 1647 do_rtws_sync(&rand, cur_ops->exp_sync); 1648 rcu_torture_pipe_update(old_rp); 1649 break; 1650 case RTWS_COND_GET: 1651 rcu_torture_writer_state = RTWS_COND_GET; 1652 gp_snap = cur_ops->get_gp_state(); 1653 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1654 1000, &rand); 1655 rcu_torture_writer_state = RTWS_COND_SYNC; 1656 cur_ops->cond_sync(gp_snap); 1657 rcu_torture_pipe_update(old_rp); 1658 break; 1659 case RTWS_COND_GET_EXP: 1660 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1661 gp_snap = cur_ops->get_gp_state_exp(); 1662 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1663 1000, &rand); 1664 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1665 cur_ops->cond_sync_exp(gp_snap); 1666 rcu_torture_pipe_update(old_rp); 1667 break; 1668 case RTWS_COND_GET_FULL: 1669 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1670 cur_ops->get_gp_state_full(&gp_snap_full); 1671 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi, 1672 1000, &rand); 1673 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1674 cur_ops->cond_sync_full(&gp_snap_full); 1675 rcu_torture_pipe_update(old_rp); 1676 break; 1677 case RTWS_COND_GET_EXP_FULL: 1678 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1679 cur_ops->get_gp_state_full(&gp_snap_full); 1680 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp, 1681 1000, &rand); 1682 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1683 cur_ops->cond_sync_exp_full(&gp_snap_full); 1684 rcu_torture_pipe_update(old_rp); 1685 break; 1686 case RTWS_POLL_GET: 1687 rcu_torture_writer_state = RTWS_POLL_GET; 1688 for (i = 0; i < ulo_size; i++) 1689 ulo[i] = cur_ops->get_comp_state(); 1690 gp_snap = cur_ops->start_gp_poll(); 1691 rcu_torture_writer_state = RTWS_POLL_WAIT; 1692 while (!cur_ops->poll_gp_state(gp_snap)) { 1693 gp_snap1 = cur_ops->get_gp_state(); 1694 for (i = 0; i < ulo_size; i++) 1695 if (cur_ops->poll_gp_state(ulo[i]) || 1696 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1697 ulo[i] = gp_snap1; 1698 break; 1699 } 1700 WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size); 1701 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1702 1000, &rand); 1703 } 1704 rcu_torture_pipe_update(old_rp); 1705 break; 1706 case RTWS_POLL_GET_FULL: 1707 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1708 for (i = 0; i < rgo_size; i++) 1709 cur_ops->get_comp_state_full(&rgo[i]); 1710 cur_ops->start_gp_poll_full(&gp_snap_full); 1711 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1712 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1713 cur_ops->get_gp_state_full(&gp_snap1_full); 1714 for (i = 0; i < rgo_size; i++) 1715 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1716 cur_ops->same_gp_state_full(&rgo[i], 1717 &gp_snap1_full)) { 1718 rgo[i] = gp_snap1_full; 1719 break; 1720 } 1721 WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size); 1722 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi, 1723 1000, &rand); 1724 } 1725 rcu_torture_pipe_update(old_rp); 1726 break; 1727 case RTWS_POLL_GET_EXP: 1728 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1729 gp_snap = cur_ops->start_gp_poll_exp(); 1730 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1731 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1732 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1733 1000, &rand); 1734 rcu_torture_pipe_update(old_rp); 1735 break; 1736 case RTWS_POLL_GET_EXP_FULL: 1737 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1738 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1739 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1740 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1741 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp, 1742 1000, &rand); 1743 rcu_torture_pipe_update(old_rp); 1744 break; 1745 case RTWS_SYNC: 1746 rcu_torture_writer_state = RTWS_SYNC; 1747 do_rtws_sync(&rand, cur_ops->sync); 1748 rcu_torture_pipe_update(old_rp); 1749 break; 1750 default: 1751 WARN_ON_ONCE(1); 1752 break; 1753 } 1754 } 1755 WRITE_ONCE(rcu_torture_current_version, 1756 rcu_torture_current_version + 1); 1757 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1758 if (can_expedite && 1759 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1760 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1761 if (expediting >= 0) 1762 rcu_expedite_gp(); 1763 else 1764 rcu_unexpedite_gp(); 1765 if (++expediting > 3) 1766 expediting = -expediting; 1767 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1768 can_expedite = !rcu_gp_is_expedited() && 1769 !rcu_gp_is_normal(); 1770 } 1771 rcu_torture_writer_state = RTWS_STUTTER; 1772 boot_ended = rcu_inkernel_boot_has_ended(); 1773 stutter_waited = stutter_wait("rcu_torture_writer"); 1774 if (stutter_waited && 1775 !atomic_read(&rcu_fwd_cb_nodelay) && 1776 !cur_ops->slow_gps && 1777 !torture_must_stop() && 1778 boot_ended && 1779 time_after(jiffies, stallsdone)) 1780 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1781 if (list_empty(&rcu_tortures[i].rtort_free) && 1782 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { 1783 tracing_off(); 1784 if (cur_ops->gp_kthread_dbg) 1785 cur_ops->gp_kthread_dbg(); 1786 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1787 rcu_ftrace_dump(DUMP_ALL); 1788 break; 1789 } 1790 if (stutter_waited) 1791 sched_set_normal(current, oldnice); 1792 } while (!torture_must_stop()); 1793 rcu_torture_current = NULL; // Let stats task know that we are done. 1794 /* Reset expediting back to unexpedited. */ 1795 if (expediting > 0) 1796 expediting = -expediting; 1797 while (can_expedite && expediting++ < 0) 1798 rcu_unexpedite_gp(); 1799 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1800 if (!can_expedite) 1801 pr_alert("%s" TORTURE_FLAG 1802 " Dynamic grace-period expediting was disabled.\n", 1803 torture_type); 1804 kfree(ulo); 1805 kfree(rgo); 1806 rcu_torture_writer_state = RTWS_STOPPING; 1807 torture_kthread_stopping("rcu_torture_writer"); 1808 return 0; 1809 } 1810 1811 /* 1812 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1813 * delay between calls. 1814 */ 1815 static int 1816 rcu_torture_fakewriter(void *arg) 1817 { 1818 unsigned long gp_snap; 1819 struct rcu_gp_oldstate gp_snap_full; 1820 DEFINE_TORTURE_RANDOM(rand); 1821 1822 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1823 set_user_nice(current, MAX_NICE); 1824 1825 if (WARN_ONCE(nsynctypes == 0, 1826 "%s: No update-side primitives.\n", __func__)) { 1827 /* 1828 * No updates primitives, so don't try updating. 1829 * The resulting test won't be testing much, hence the 1830 * above WARN_ONCE(). 1831 */ 1832 torture_kthread_stopping("rcu_torture_fakewriter"); 1833 return 0; 1834 } 1835 1836 do { 1837 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1838 if (cur_ops->cb_barrier != NULL && 1839 torture_random(&rand) % (nrealfakewriters * 8) == 0) { 1840 cur_ops->cb_barrier(); 1841 } else { 1842 switch (synctype[torture_random(&rand) % nsynctypes]) { 1843 case RTWS_DEF_FREE: 1844 break; 1845 case RTWS_EXP_SYNC: 1846 cur_ops->exp_sync(); 1847 break; 1848 case RTWS_COND_GET: 1849 gp_snap = cur_ops->get_gp_state(); 1850 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1851 cur_ops->cond_sync(gp_snap); 1852 break; 1853 case RTWS_COND_GET_EXP: 1854 gp_snap = cur_ops->get_gp_state_exp(); 1855 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1856 cur_ops->cond_sync_exp(gp_snap); 1857 break; 1858 case RTWS_COND_GET_FULL: 1859 cur_ops->get_gp_state_full(&gp_snap_full); 1860 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1861 cur_ops->cond_sync_full(&gp_snap_full); 1862 break; 1863 case RTWS_COND_GET_EXP_FULL: 1864 cur_ops->get_gp_state_full(&gp_snap_full); 1865 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1866 cur_ops->cond_sync_exp_full(&gp_snap_full); 1867 break; 1868 case RTWS_POLL_GET: 1869 if (cur_ops->start_poll_irqsoff) 1870 local_irq_disable(); 1871 gp_snap = cur_ops->start_gp_poll(); 1872 if (cur_ops->start_poll_irqsoff) 1873 local_irq_enable(); 1874 while (!cur_ops->poll_gp_state(gp_snap)) { 1875 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1876 &rand); 1877 } 1878 break; 1879 case RTWS_POLL_GET_FULL: 1880 if (cur_ops->start_poll_irqsoff) 1881 local_irq_disable(); 1882 cur_ops->start_gp_poll_full(&gp_snap_full); 1883 if (cur_ops->start_poll_irqsoff) 1884 local_irq_enable(); 1885 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1886 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1887 &rand); 1888 } 1889 break; 1890 case RTWS_POLL_GET_EXP: 1891 gp_snap = cur_ops->start_gp_poll_exp(); 1892 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1893 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1894 &rand); 1895 } 1896 break; 1897 case RTWS_POLL_GET_EXP_FULL: 1898 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1899 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1900 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1901 &rand); 1902 } 1903 break; 1904 case RTWS_SYNC: 1905 cur_ops->sync(); 1906 break; 1907 default: 1908 WARN_ON_ONCE(1); 1909 break; 1910 } 1911 } 1912 stutter_wait("rcu_torture_fakewriter"); 1913 } while (!torture_must_stop()); 1914 1915 torture_kthread_stopping("rcu_torture_fakewriter"); 1916 return 0; 1917 } 1918 1919 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1920 { 1921 kfree(rhp); 1922 } 1923 1924 // Set up and carry out testing of RCU's global memory ordering 1925 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1926 struct torture_random_state *trsp) 1927 { 1928 unsigned long loops; 1929 int noc = torture_num_online_cpus(); 1930 int rdrchked; 1931 int rdrchker; 1932 struct rcu_torture_reader_check *rtrcp; // Me. 1933 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1934 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1935 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1936 1937 if (myid < 0) 1938 return; // Don't try this from timer handlers. 1939 1940 // Increment my counter. 1941 rtrcp = &rcu_torture_reader_mbchk[myid]; 1942 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1943 1944 // Attempt to assign someone else some checking work. 1945 rdrchked = torture_random(trsp) % nrealreaders; 1946 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1947 rdrchker = torture_random(trsp) % nrealreaders; 1948 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1949 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1950 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1951 !READ_ONCE(rtp->rtort_chkp) && 1952 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1953 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1954 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1955 rtrcp->rtc_chkrdr = rdrchked; 1956 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1957 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1958 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1959 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1960 } 1961 1962 // If assigned some completed work, do it! 1963 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1964 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1965 return; // No work or work not yet ready. 1966 rdrchked = rtrcp_assigner->rtc_chkrdr; 1967 if (WARN_ON_ONCE(rdrchked < 0)) 1968 return; 1969 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1970 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1971 atomic_inc(&n_rcu_torture_mbchk_tries); 1972 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1973 atomic_inc(&n_rcu_torture_mbchk_fail); 1974 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1975 rtrcp_assigner->rtc_ready = 0; 1976 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1977 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1978 } 1979 1980 // Verify the specified RCUTORTURE_RDR* state. 1981 #define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count() 1982 static void rcutorture_one_extend_check(char *s, int curstate, int new, int old) 1983 { 1984 int mask; 1985 1986 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE) || in_nmi()) 1987 return; 1988 1989 WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled() && !in_hardirq(), ROEC_ARGS); 1990 WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS); 1991 1992 // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable. 1993 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 1994 return; 1995 1996 WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 1997 !softirq_count(), ROEC_ARGS); 1998 WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) && 1999 !(preempt_count() & PREEMPT_MASK), ROEC_ARGS); 2000 WARN_ONCE(cur_ops->readlock_nesting && 2001 (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) && 2002 cur_ops->readlock_nesting() == 0, ROEC_ARGS); 2003 2004 // Interrupt handlers have all sorts of stuff disabled, so ignore 2005 // unintended disabling. 2006 if (in_serving_softirq() || in_hardirq()) 2007 return; 2008 2009 WARN_ONCE(cur_ops->extendables && 2010 !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) && 2011 softirq_count(), ROEC_ARGS); 2012 2013 /* 2014 * non-preemptible RCU in a preemptible kernel uses preempt_disable() 2015 * as rcu_read_lock(). 2016 */ 2017 mask = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2018 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 2019 mask |= RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2020 2021 WARN_ONCE(cur_ops->extendables && !(curstate & mask) && 2022 (preempt_count() & PREEMPT_MASK), ROEC_ARGS); 2023 2024 /* 2025 * non-preemptible RCU in a preemptible kernel uses "preempt_count() & 2026 * PREEMPT_MASK" as ->readlock_nesting(). 2027 */ 2028 mask = RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2029 if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) 2030 mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2031 2032 if (IS_ENABLED(CONFIG_PREEMPT_RT) && softirq_count()) 2033 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2034 2035 WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) && 2036 cur_ops->readlock_nesting() > 0, ROEC_ARGS); 2037 } 2038 2039 /* 2040 * Do one extension of an RCU read-side critical section using the 2041 * current reader state in readstate (set to zero for initial entry 2042 * to extended critical section), set the new state as specified by 2043 * newstate (set to zero for final exit from extended critical section), 2044 * and random-number-generator state in trsp. If this is neither the 2045 * beginning or end of the critical section and if there was actually a 2046 * change, do a ->read_delay(). 2047 */ 2048 static void rcutorture_one_extend(int *readstate, int newstate, struct torture_random_state *trsp, 2049 struct rt_read_seg *rtrsp) 2050 { 2051 bool first; 2052 unsigned long flags; 2053 int idxnew1 = -1; 2054 int idxnew2 = -1; 2055 int idxold1 = *readstate; 2056 int idxold2 = idxold1; 2057 int statesnew = ~*readstate & newstate; 2058 int statesold = *readstate & ~newstate; 2059 2060 first = idxold1 == 0; 2061 WARN_ON_ONCE(idxold2 < 0); 2062 WARN_ON_ONCE(idxold2 & ~(RCUTORTURE_RDR_ALLBITS | RCUTORTURE_RDR_UPDOWN)); 2063 rcutorture_one_extend_check("before change", idxold1, statesnew, statesold); 2064 rtrsp->rt_readstate = newstate; 2065 2066 /* First, put new protection in place to avoid critical-section gap. */ 2067 if (statesnew & RCUTORTURE_RDR_BH) 2068 local_bh_disable(); 2069 if (statesnew & RCUTORTURE_RDR_RBH) 2070 rcu_read_lock_bh(); 2071 if (statesnew & RCUTORTURE_RDR_IRQ) 2072 local_irq_disable(); 2073 if (statesnew & RCUTORTURE_RDR_PREEMPT) 2074 preempt_disable(); 2075 if (statesnew & RCUTORTURE_RDR_SCHED) 2076 rcu_read_lock_sched(); 2077 if (statesnew & RCUTORTURE_RDR_RCU_1) 2078 idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 2079 if (statesnew & RCUTORTURE_RDR_RCU_2) 2080 idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2; 2081 2082 // Complain unless both the old and the new protection is in place. 2083 rcutorture_one_extend_check("during change", idxold1 | statesnew, statesnew, statesold); 2084 2085 // Sample CPU under both sets of protections to reduce confusion. 2086 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 2087 int cpu = raw_smp_processor_id(); 2088 rtrsp->rt_cpu = cpu; 2089 if (!first) { 2090 rtrsp[-1].rt_end_cpu = cpu; 2091 if (cur_ops->reader_blocked) 2092 rtrsp[-1].rt_preempted = cur_ops->reader_blocked(); 2093 } 2094 } 2095 // Sample grace-period sequence number, as good a place as any. 2096 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) { 2097 rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs(); 2098 rtrsp->rt_ts = ktime_get_mono_fast_ns(); 2099 if (!first) 2100 rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq; 2101 } 2102 2103 /* 2104 * Next, remove old protection, in decreasing order of strength 2105 * to avoid unlock paths that aren't safe in the stronger 2106 * context. Namely: BH can not be enabled with disabled interrupts. 2107 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 2108 * context. 2109 */ 2110 if (statesold & RCUTORTURE_RDR_IRQ) 2111 local_irq_enable(); 2112 if (statesold & RCUTORTURE_RDR_PREEMPT) 2113 preempt_enable(); 2114 if (statesold & RCUTORTURE_RDR_SCHED) 2115 rcu_read_unlock_sched(); 2116 if (statesold & RCUTORTURE_RDR_BH) 2117 local_bh_enable(); 2118 if (statesold & RCUTORTURE_RDR_RBH) 2119 rcu_read_unlock_bh(); 2120 if (statesold & RCUTORTURE_RDR_RCU_2) { 2121 cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2); 2122 WARN_ON_ONCE(idxnew2 != -1); 2123 idxold2 = 0; 2124 } 2125 if (statesold & RCUTORTURE_RDR_RCU_1) { 2126 bool lockit; 2127 2128 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 2129 if (lockit) 2130 raw_spin_lock_irqsave(¤t->pi_lock, flags); 2131 cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 2132 WARN_ON_ONCE(idxnew1 != -1); 2133 idxold1 = 0; 2134 if (lockit) 2135 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 2136 } 2137 if (statesold & RCUTORTURE_RDR_UPDOWN) { 2138 cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1); 2139 WARN_ON_ONCE(idxnew1 != -1); 2140 idxold1 = 0; 2141 } 2142 2143 /* Delay if neither beginning nor end and there was a change. */ 2144 if ((statesnew || statesold) && *readstate && newstate) 2145 cur_ops->read_delay(trsp, rtrsp); 2146 2147 /* Update the reader state. */ 2148 if (idxnew1 == -1) 2149 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 2150 WARN_ON_ONCE(idxnew1 < 0); 2151 if (idxnew2 == -1) 2152 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 2153 WARN_ON_ONCE(idxnew2 < 0); 2154 *readstate = idxnew1 | idxnew2 | newstate; 2155 WARN_ON_ONCE(*readstate < 0); 2156 if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS)) 2157 pr_info("Unexpected readstate value of %#x\n", *readstate); 2158 rcutorture_one_extend_check("after change", *readstate, statesnew, statesold); 2159 } 2160 2161 /* Return the biggest extendables mask given current RCU and boot parameters. */ 2162 static int rcutorture_extend_mask_max(void) 2163 { 2164 int mask; 2165 2166 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 2167 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 2168 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 2169 return mask; 2170 } 2171 2172 /* Return a random protection state mask, but with at least one bit set. */ 2173 static int 2174 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 2175 { 2176 int mask = rcutorture_extend_mask_max(); 2177 unsigned long randmask1 = torture_random(trsp); 2178 unsigned long randmask2 = randmask1 >> 3; 2179 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 2180 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 2181 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 2182 2183 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits. 2184 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 2185 if (!(randmask1 & 0x7)) 2186 mask = mask & randmask2; 2187 else 2188 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 2189 2190 // Can't have nested RCU reader without outer RCU reader. 2191 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 2192 if (oldmask & RCUTORTURE_RDR_RCU_1) 2193 mask &= ~RCUTORTURE_RDR_RCU_2; 2194 else 2195 mask |= RCUTORTURE_RDR_RCU_1; 2196 } 2197 2198 /* 2199 * Can't enable bh w/irq disabled. 2200 */ 2201 if (mask & RCUTORTURE_RDR_IRQ) 2202 mask |= oldmask & bhs; 2203 2204 /* 2205 * Ideally these sequences would be detected in debug builds 2206 * (regardless of RT), but until then don't stop testing 2207 * them on non-RT. 2208 */ 2209 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 2210 /* Can't modify BH in atomic context */ 2211 if (oldmask & preempts_irq) 2212 mask &= ~bhs; 2213 if ((oldmask | mask) & preempts_irq) 2214 mask |= oldmask & bhs; 2215 } 2216 2217 return mask ?: RCUTORTURE_RDR_RCU_1; 2218 } 2219 2220 /* 2221 * Do a randomly selected number of extensions of an existing RCU read-side 2222 * critical section. 2223 */ 2224 static struct rt_read_seg * 2225 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, struct rt_read_seg *rtrsp) 2226 { 2227 int i; 2228 int j; 2229 int mask = rcutorture_extend_mask_max(); 2230 2231 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 2232 if (!((mask - 1) & mask)) 2233 return rtrsp; /* Current RCU reader not extendable. */ 2234 /* Bias towards larger numbers of loops. */ 2235 i = torture_random(trsp); 2236 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 2237 for (j = 0; j < i; j++) { 2238 mask = rcutorture_extend_mask(*readstate, trsp); 2239 WARN_ON_ONCE(mask & RCUTORTURE_RDR_UPDOWN); 2240 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 2241 } 2242 return &rtrsp[j]; 2243 } 2244 2245 struct rcu_torture_one_read_state { 2246 bool checkpolling; 2247 unsigned long cookie; 2248 struct rcu_gp_oldstate cookie_full; 2249 unsigned long started; 2250 struct rcu_torture *p; 2251 int readstate; 2252 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS]; 2253 struct rt_read_seg *rtrsp; 2254 unsigned long long ts; 2255 }; 2256 2257 static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp, 2258 struct torture_random_state *trsp) 2259 { 2260 memset(rtorsp, 0, sizeof(*rtorsp)); 2261 rtorsp->checkpolling = !(torture_random(trsp) & 0xfff); 2262 rtorsp->rtrsp = &rtorsp->rtseg[0]; 2263 } 2264 2265 /* 2266 * Set up the first segment of a series of overlapping read-side 2267 * critical sections. The caller must have actually initiated the 2268 * outermost read-side critical section. 2269 */ 2270 static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp, 2271 struct torture_random_state *trsp, long myid) 2272 { 2273 if (rtorsp->checkpolling) { 2274 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2275 rtorsp->cookie = cur_ops->get_gp_state(); 2276 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2277 cur_ops->get_gp_state_full(&rtorsp->cookie_full); 2278 } 2279 rtorsp->started = cur_ops->get_gp_seq(); 2280 rtorsp->ts = rcu_trace_clock_local(); 2281 rtorsp->p = rcu_dereference_check(rcu_torture_current, 2282 !cur_ops->readlock_held || cur_ops->readlock_held() || 2283 (rtorsp->readstate & RCUTORTURE_RDR_UPDOWN)); 2284 if (rtorsp->p == NULL) { 2285 /* Wait for rcu_torture_writer to get underway */ 2286 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp); 2287 return false; 2288 } 2289 if (rtorsp->p->rtort_mbtest == 0) 2290 atomic_inc(&n_rcu_torture_mberror); 2291 rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp); 2292 return true; 2293 } 2294 2295 /* 2296 * Complete the last segment of a series of overlapping read-side 2297 * critical sections and check for errors. 2298 */ 2299 static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp, 2300 struct torture_random_state *trsp) 2301 { 2302 int i; 2303 unsigned long completed; 2304 int pipe_count; 2305 bool preempted = false; 2306 struct rt_read_seg *rtrsp1; 2307 2308 preempt_disable(); 2309 pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count); 2310 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 2311 // Should not happen in a correct RCU implementation, 2312 // happens quite often for torture_type=busted. 2313 pipe_count = RCU_TORTURE_PIPE_LEN; 2314 } 2315 completed = cur_ops->get_gp_seq(); 2316 if (pipe_count > 1) { 2317 do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu, 2318 rtorsp->ts, rtorsp->started, completed); 2319 rcu_ftrace_dump(DUMP_ALL); 2320 } 2321 __this_cpu_inc(rcu_torture_count[pipe_count]); 2322 completed = rcutorture_seq_diff(completed, rtorsp->started); 2323 if (completed > RCU_TORTURE_PIPE_LEN) { 2324 /* Should not happen, but... */ 2325 completed = RCU_TORTURE_PIPE_LEN; 2326 } 2327 __this_cpu_inc(rcu_torture_batch[completed]); 2328 preempt_enable(); 2329 if (rtorsp->checkpolling) { 2330 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2331 WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie), 2332 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2333 __func__, 2334 rcu_torture_writer_state_getname(), 2335 rcu_torture_writer_state, 2336 rtorsp->cookie, cur_ops->get_gp_state()); 2337 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2338 WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full), 2339 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2340 __func__, 2341 rcu_torture_writer_state_getname(), 2342 rcu_torture_writer_state, 2343 cpumask_pr_args(cpu_online_mask)); 2344 } 2345 if (cur_ops->reader_blocked) 2346 preempted = cur_ops->reader_blocked(); 2347 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp); 2348 WARN_ON_ONCE(rtorsp->readstate); 2349 // This next splat is expected behavior if leakpointer, especially 2350 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2351 WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1); 2352 2353 /* If error or close call, record the sequence of reader protections. */ 2354 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2355 i = 0; 2356 for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++) 2357 err_segs[i++] = *rtrsp1; 2358 rt_read_nsegs = i; 2359 rt_read_preempted = preempted; 2360 } 2361 } 2362 2363 /* 2364 * Do one read-side critical section, returning false if there was 2365 * no data to read. Can be invoked both from process context and 2366 * from a timer handler. 2367 */ 2368 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 2369 { 2370 int newstate; 2371 struct rcu_torture_one_read_state rtors; 2372 2373 WARN_ON_ONCE(!rcu_is_watching()); 2374 init_rcu_torture_one_read_state(&rtors, trsp); 2375 newstate = rcutorture_extend_mask(rtors.readstate, trsp); 2376 WARN_ON_ONCE(newstate & RCUTORTURE_RDR_UPDOWN); 2377 rcutorture_one_extend(&rtors.readstate, newstate, trsp, rtors.rtrsp++); 2378 if (!rcu_torture_one_read_start(&rtors, trsp, myid)) { 2379 rcutorture_one_extend(&rtors.readstate, 0, trsp, rtors.rtrsp); 2380 return false; 2381 } 2382 rtors.rtrsp = rcutorture_loop_extend(&rtors.readstate, trsp, rtors.rtrsp); 2383 rcu_torture_one_read_end(&rtors, trsp); 2384 return true; 2385 } 2386 2387 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2388 2389 /* 2390 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2391 * incrementing the corresponding element of the pipeline array. The 2392 * counter in the element should never be greater than 1, otherwise, the 2393 * RCU implementation is broken. 2394 */ 2395 static void rcu_torture_timer(struct timer_list *unused) 2396 { 2397 atomic_long_inc(&n_rcu_torture_timers); 2398 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2399 2400 /* Test call_rcu() invocation from interrupt handler. */ 2401 if (cur_ops->call) { 2402 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2403 2404 if (rhp) 2405 cur_ops->call(rhp, rcu_torture_timer_cb); 2406 } 2407 } 2408 2409 /* 2410 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2411 * incrementing the corresponding element of the pipeline array. The 2412 * counter in the element should never be greater than 1, otherwise, the 2413 * RCU implementation is broken. 2414 */ 2415 static int 2416 rcu_torture_reader(void *arg) 2417 { 2418 unsigned long lastsleep = jiffies; 2419 long myid = (long)arg; 2420 int mynumonline = myid; 2421 DEFINE_TORTURE_RANDOM(rand); 2422 struct timer_list t; 2423 2424 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2425 set_user_nice(current, MAX_NICE); 2426 if (irqreader && cur_ops->irq_capable) 2427 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2428 tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. 2429 do { 2430 if (irqreader && cur_ops->irq_capable) { 2431 if (!timer_pending(&t)) 2432 mod_timer(&t, jiffies + 1); 2433 } 2434 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2435 schedule_timeout_interruptible(HZ); 2436 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2437 torture_hrtimeout_us(500, 1000, &rand); 2438 lastsleep = jiffies + 10; 2439 } 2440 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2441 schedule_timeout_interruptible(HZ / 5); 2442 stutter_wait("rcu_torture_reader"); 2443 } while (!torture_must_stop()); 2444 if (irqreader && cur_ops->irq_capable) { 2445 timer_delete_sync(&t); 2446 timer_destroy_on_stack(&t); 2447 } 2448 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2449 torture_kthread_stopping("rcu_torture_reader"); 2450 return 0; 2451 } 2452 2453 struct rcu_torture_one_read_state_updown { 2454 struct hrtimer rtorsu_hrt; 2455 bool rtorsu_inuse; 2456 ktime_t rtorsu_kt; 2457 int rtorsu_cpu; 2458 unsigned long rtorsu_j; 2459 unsigned long rtorsu_ndowns; 2460 unsigned long rtorsu_nups; 2461 unsigned long rtorsu_nmigrates; 2462 struct torture_random_state rtorsu_trs; 2463 struct rcu_torture_one_read_state rtorsu_rtors; 2464 }; 2465 2466 static struct rcu_torture_one_read_state_updown *updownreaders; 2467 static DEFINE_TORTURE_RANDOM(rcu_torture_updown_rand); 2468 static int rcu_torture_updown(void *arg); 2469 2470 static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp) 2471 { 2472 int cpu = raw_smp_processor_id(); 2473 struct rcu_torture_one_read_state_updown *rtorsup; 2474 2475 rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt); 2476 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2477 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2478 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2479 WRITE_ONCE(rtorsup->rtorsu_nmigrates, 2480 rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu)); 2481 smp_store_release(&rtorsup->rtorsu_inuse, false); 2482 return HRTIMER_NORESTART; 2483 } 2484 2485 static int rcu_torture_updown_init(void) 2486 { 2487 int i; 2488 struct torture_random_state *rand = &rcu_torture_updown_rand; 2489 int ret; 2490 2491 if (n_up_down < 0) 2492 return 0; 2493 if (!srcu_torture_have_up_down()) { 2494 VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives"); 2495 return 0; 2496 } 2497 updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL); 2498 if (!updownreaders) { 2499 VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests"); 2500 return -ENOMEM; 2501 } 2502 for (i = 0; i < n_up_down; i++) { 2503 init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, rand); 2504 hrtimer_setup(&updownreaders[i].rtorsu_hrt, rcu_torture_updown_hrt, CLOCK_MONOTONIC, 2505 HRTIMER_MODE_REL | HRTIMER_MODE_HARD); 2506 torture_random_init(&updownreaders[i].rtorsu_trs); 2507 init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, 2508 &updownreaders[i].rtorsu_trs); 2509 } 2510 ret = torture_create_kthread(rcu_torture_updown, rand, updown_task); 2511 if (ret) { 2512 kfree(updownreaders); 2513 updownreaders = NULL; 2514 } 2515 return ret; 2516 } 2517 2518 static void rcu_torture_updown_cleanup(void) 2519 { 2520 struct rcu_torture_one_read_state_updown *rtorsup; 2521 2522 for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { 2523 if (!smp_load_acquire(&rtorsup->rtorsu_inuse)) 2524 continue; 2525 if (hrtimer_cancel(&rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) { 2526 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2527 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2528 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2529 smp_store_release(&rtorsup->rtorsu_inuse, false); 2530 } 2531 2532 } 2533 kfree(updownreaders); 2534 updownreaders = NULL; 2535 } 2536 2537 // Do one reader for rcu_torture_updown(). 2538 static void rcu_torture_updown_one(struct rcu_torture_one_read_state_updown *rtorsup) 2539 { 2540 int idx; 2541 int rawidx; 2542 ktime_t t; 2543 2544 init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs); 2545 rawidx = cur_ops->down_read(); 2546 WRITE_ONCE(rtorsup->rtorsu_ndowns, rtorsup->rtorsu_ndowns + 1); 2547 idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1; 2548 rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN; 2549 rtorsup->rtorsu_rtors.rtrsp++; 2550 rtorsup->rtorsu_cpu = raw_smp_processor_id(); 2551 if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1)) { 2552 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders); 2553 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1); 2554 schedule_timeout_idle(HZ); 2555 return; 2556 } 2557 smp_store_release(&rtorsup->rtorsu_inuse, true); 2558 t = torture_random(&rtorsup->rtorsu_trs) & 0xfffff; // One per million. 2559 if (t < 10 * 1000) 2560 t = 200 * 1000 * 1000; 2561 hrtimer_start(&rtorsup->rtorsu_hrt, t, HRTIMER_MODE_REL | HRTIMER_MODE_HARD); 2562 smp_mb(); // Sample jiffies after posting hrtimer. 2563 rtorsup->rtorsu_j = jiffies; // Not used by hrtimer handler. 2564 rtorsup->rtorsu_kt = t; 2565 } 2566 2567 /* 2568 * RCU torture up/down reader kthread, starting RCU readers in kthread 2569 * context and ending them in hrtimer handlers. Otherwise similar to 2570 * rcu_torture_reader(). 2571 */ 2572 static int 2573 rcu_torture_updown(void *arg) 2574 { 2575 unsigned long j; 2576 struct rcu_torture_one_read_state_updown *rtorsup; 2577 2578 VERBOSE_TOROUT_STRING("rcu_torture_updown task started"); 2579 do { 2580 for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) { 2581 if (torture_must_stop()) 2582 break; 2583 j = smp_load_acquire(&jiffies); // Time before ->rtorsu_inuse. 2584 if (smp_load_acquire(&rtorsup->rtorsu_inuse)) { 2585 WARN_ONCE(time_after(j, rtorsup->rtorsu_j + 1 + HZ * 10), 2586 "hrtimer queued at jiffies %lu for %lld ns took %lu jiffies\n", rtorsup->rtorsu_j, rtorsup->rtorsu_kt, j - rtorsup->rtorsu_j); 2587 continue; 2588 } 2589 rcu_torture_updown_one(rtorsup); 2590 } 2591 torture_hrtimeout_ms(1, 1000, &rcu_torture_updown_rand); 2592 stutter_wait("rcu_torture_updown"); 2593 } while (!torture_must_stop()); 2594 rcu_torture_updown_cleanup(); 2595 torture_kthread_stopping("rcu_torture_updown"); 2596 return 0; 2597 } 2598 2599 /* 2600 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2601 * increase race probabilities and fuzzes the interval between toggling. 2602 */ 2603 static int rcu_nocb_toggle(void *arg) 2604 { 2605 int cpu; 2606 int maxcpu = -1; 2607 int oldnice = task_nice(current); 2608 long r; 2609 DEFINE_TORTURE_RANDOM(rand); 2610 ktime_t toggle_delay; 2611 unsigned long toggle_fuzz; 2612 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2613 2614 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2615 while (!rcu_inkernel_boot_has_ended()) 2616 schedule_timeout_interruptible(HZ / 10); 2617 for_each_possible_cpu(cpu) 2618 maxcpu = cpu; 2619 WARN_ON(maxcpu < 0); 2620 if (toggle_interval > ULONG_MAX) 2621 toggle_fuzz = ULONG_MAX >> 3; 2622 else 2623 toggle_fuzz = toggle_interval >> 3; 2624 if (toggle_fuzz <= 0) 2625 toggle_fuzz = NSEC_PER_USEC; 2626 do { 2627 r = torture_random(&rand); 2628 cpu = (r >> 1) % (maxcpu + 1); 2629 if (r & 0x1) { 2630 rcu_nocb_cpu_offload(cpu); 2631 atomic_long_inc(&n_nocb_offload); 2632 } else { 2633 rcu_nocb_cpu_deoffload(cpu); 2634 atomic_long_inc(&n_nocb_deoffload); 2635 } 2636 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2637 set_current_state(TASK_INTERRUPTIBLE); 2638 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2639 if (stutter_wait("rcu_nocb_toggle")) 2640 sched_set_normal(current, oldnice); 2641 } while (!torture_must_stop()); 2642 torture_kthread_stopping("rcu_nocb_toggle"); 2643 return 0; 2644 } 2645 2646 /* 2647 * Print torture statistics. Caller must ensure that there is only 2648 * one call to this function at a given time!!! This is normally 2649 * accomplished by relying on the module system to only have one copy 2650 * of the module loaded, and then by giving the rcu_torture_stats 2651 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2652 * thread is not running). 2653 */ 2654 static void 2655 rcu_torture_stats_print(void) 2656 { 2657 int cpu; 2658 int i; 2659 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2660 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2661 long n_gpwraps = 0; 2662 unsigned long ndowns = 0; 2663 unsigned long nunexpired = 0; 2664 unsigned long nmigrates = 0; 2665 unsigned long nups = 0; 2666 struct rcu_torture *rtcp; 2667 static unsigned long rtcv_snap = ULONG_MAX; 2668 static bool splatted; 2669 struct task_struct *wtp; 2670 2671 for_each_possible_cpu(cpu) { 2672 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2673 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2674 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2675 } 2676 if (cur_ops->get_gpwrap_count) 2677 n_gpwraps += cur_ops->get_gpwrap_count(cpu); 2678 } 2679 if (updownreaders) { 2680 for (i = 0; i < n_up_down; i++) { 2681 ndowns += READ_ONCE(updownreaders[i].rtorsu_ndowns); 2682 nups += READ_ONCE(updownreaders[i].rtorsu_nups); 2683 nunexpired += READ_ONCE(updownreaders[i].rtorsu_inuse); 2684 nmigrates += READ_ONCE(updownreaders[i].rtorsu_nmigrates); 2685 } 2686 } 2687 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2688 if (pipesummary[i] != 0) 2689 break; 2690 } // The value of variable "i" is used later, so don't clobber it! 2691 2692 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2693 rtcp = rcu_access_pointer(rcu_torture_current); 2694 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2695 rtcp, 2696 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2697 rcu_torture_current_version, 2698 list_empty(&rcu_torture_freelist), 2699 atomic_read(&n_rcu_torture_alloc), 2700 atomic_read(&n_rcu_torture_alloc_fail), 2701 atomic_read(&n_rcu_torture_free)); 2702 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ", 2703 atomic_read(&n_rcu_torture_mberror), 2704 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2705 n_rcu_torture_barrier_error, 2706 n_rcu_torture_boost_ktrerror); 2707 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2708 n_rcu_torture_boost_failure, 2709 n_rcu_torture_boosts, 2710 atomic_long_read(&n_rcu_torture_timers)); 2711 if (updownreaders) 2712 pr_cont("ndowns: %lu nups: %lu nhrt: %lu nmigrates: %lu ", ndowns, nups, nunexpired, nmigrates); 2713 torture_onoff_stats(); 2714 pr_cont("barrier: %ld/%ld:%ld ", 2715 data_race(n_barrier_successes), 2716 data_race(n_barrier_attempts), 2717 data_race(n_rcu_torture_barrier_error)); 2718 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2719 pr_cont("nocb-toggles: %ld:%ld ", 2720 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2721 pr_cont("gpwraps: %ld\n", n_gpwraps); 2722 2723 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2724 if (atomic_read(&n_rcu_torture_mberror) || 2725 atomic_read(&n_rcu_torture_mbchk_fail) || 2726 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2727 n_rcu_torture_boost_failure || i > 1) { 2728 pr_cont("%s", "!!! "); 2729 atomic_inc(&n_rcu_torture_error); 2730 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2731 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2732 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2733 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2734 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2735 WARN_ON_ONCE(i > 1); // Too-short grace period 2736 } 2737 pr_cont("Reader Pipe: "); 2738 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2739 pr_cont(" %ld", pipesummary[i]); 2740 pr_cont("\n"); 2741 2742 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2743 pr_cont("Reader Batch: "); 2744 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2745 pr_cont(" %ld", batchsummary[i]); 2746 pr_cont("\n"); 2747 2748 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2749 pr_cont("Free-Block Circulation: "); 2750 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2751 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2752 } 2753 pr_cont("\n"); 2754 2755 if (cur_ops->stats) 2756 cur_ops->stats(); 2757 if (rtcv_snap == rcu_torture_current_version && 2758 rcu_access_pointer(rcu_torture_current) && 2759 !rcu_stall_is_suppressed()) { 2760 int __maybe_unused flags = 0; 2761 unsigned long __maybe_unused gp_seq = 0; 2762 2763 if (cur_ops->get_gp_data) 2764 cur_ops->get_gp_data(&flags, &gp_seq); 2765 wtp = READ_ONCE(writer_task); 2766 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2767 rcu_torture_writer_state_getname(), 2768 rcu_torture_writer_state, gp_seq, flags, 2769 wtp == NULL ? ~0U : wtp->__state, 2770 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2771 if (!splatted && wtp) { 2772 sched_show_task(wtp); 2773 splatted = true; 2774 } 2775 if (cur_ops->gp_kthread_dbg) 2776 cur_ops->gp_kthread_dbg(); 2777 rcu_ftrace_dump(DUMP_ALL); 2778 } 2779 rtcv_snap = rcu_torture_current_version; 2780 } 2781 2782 /* 2783 * Periodically prints torture statistics, if periodic statistics printing 2784 * was specified via the stat_interval module parameter. 2785 */ 2786 static int 2787 rcu_torture_stats(void *arg) 2788 { 2789 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2790 do { 2791 schedule_timeout_interruptible(stat_interval * HZ); 2792 rcu_torture_stats_print(); 2793 torture_shutdown_absorb("rcu_torture_stats"); 2794 } while (!torture_must_stop()); 2795 torture_kthread_stopping("rcu_torture_stats"); 2796 return 0; 2797 } 2798 2799 /* Test mem_dump_obj() and friends. */ 2800 static void rcu_torture_mem_dump_obj(void) 2801 { 2802 struct rcu_head *rhp; 2803 struct kmem_cache *kcp; 2804 static int z; 2805 2806 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2807 if (WARN_ON_ONCE(!kcp)) 2808 return; 2809 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2810 if (WARN_ON_ONCE(!rhp)) { 2811 kmem_cache_destroy(kcp); 2812 return; 2813 } 2814 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2815 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2816 mem_dump_obj(ZERO_SIZE_PTR); 2817 pr_alert("mem_dump_obj(NULL):"); 2818 mem_dump_obj(NULL); 2819 pr_alert("mem_dump_obj(%px):", &rhp); 2820 mem_dump_obj(&rhp); 2821 pr_alert("mem_dump_obj(%px):", rhp); 2822 mem_dump_obj(rhp); 2823 pr_alert("mem_dump_obj(%px):", &rhp->func); 2824 mem_dump_obj(&rhp->func); 2825 pr_alert("mem_dump_obj(%px):", &z); 2826 mem_dump_obj(&z); 2827 kmem_cache_free(kcp, rhp); 2828 kmem_cache_destroy(kcp); 2829 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2830 if (WARN_ON_ONCE(!rhp)) 2831 return; 2832 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2833 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2834 mem_dump_obj(rhp); 2835 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2836 mem_dump_obj(&rhp->func); 2837 kfree(rhp); 2838 rhp = vmalloc(4096); 2839 if (WARN_ON_ONCE(!rhp)) 2840 return; 2841 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2842 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2843 mem_dump_obj(rhp); 2844 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2845 mem_dump_obj(&rhp->func); 2846 vfree(rhp); 2847 } 2848 2849 static void 2850 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2851 { 2852 pr_alert("%s" TORTURE_FLAG 2853 "--- %s: nreaders=%d nfakewriters=%d " 2854 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2855 "shuffle_interval=%d stutter=%d irqreader=%d " 2856 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2857 "test_boost=%d/%d test_boost_interval=%d " 2858 "test_boost_duration=%d test_boost_holdoff=%d shutdown_secs=%d " 2859 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2860 "stall_cpu_block=%d stall_cpu_repeat=%d " 2861 "n_barrier_cbs=%d " 2862 "onoff_interval=%d onoff_holdoff=%d " 2863 "read_exit_delay=%d read_exit_burst=%d " 2864 "reader_flavor=%x " 2865 "nocbs_nthreads=%d nocbs_toggle=%d " 2866 "test_nmis=%d " 2867 "preempt_duration=%d preempt_interval=%d n_up_down=%d\n", 2868 torture_type, tag, nrealreaders, nrealfakewriters, 2869 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2870 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2871 test_boost, cur_ops->can_boost, 2872 test_boost_interval, test_boost_duration, test_boost_holdoff, shutdown_secs, 2873 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2874 stall_cpu_block, stall_cpu_repeat, 2875 n_barrier_cbs, 2876 onoff_interval, onoff_holdoff, 2877 read_exit_delay, read_exit_burst, 2878 reader_flavor, 2879 nocbs_nthreads, nocbs_toggle, 2880 test_nmis, 2881 preempt_duration, preempt_interval, n_up_down); 2882 } 2883 2884 static int rcutorture_booster_cleanup(unsigned int cpu) 2885 { 2886 struct task_struct *t; 2887 2888 if (boost_tasks[cpu] == NULL) 2889 return 0; 2890 mutex_lock(&boost_mutex); 2891 t = boost_tasks[cpu]; 2892 boost_tasks[cpu] = NULL; 2893 rcu_torture_enable_rt_throttle(); 2894 mutex_unlock(&boost_mutex); 2895 2896 /* This must be outside of the mutex, otherwise deadlock! */ 2897 torture_stop_kthread(rcu_torture_boost, t); 2898 return 0; 2899 } 2900 2901 static int rcutorture_booster_init(unsigned int cpu) 2902 { 2903 int retval; 2904 2905 if (boost_tasks[cpu] != NULL) 2906 return 0; /* Already created, nothing more to do. */ 2907 2908 // Testing RCU priority boosting requires rcutorture do 2909 // some serious abuse. Counter this by running ksoftirqd 2910 // at higher priority. 2911 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2912 struct sched_param sp; 2913 struct task_struct *t; 2914 2915 t = per_cpu(ksoftirqd, cpu); 2916 WARN_ON_ONCE(!t); 2917 sp.sched_priority = 2; 2918 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2919 #ifdef CONFIG_IRQ_FORCED_THREADING 2920 if (force_irqthreads()) { 2921 t = per_cpu(ktimerd, cpu); 2922 WARN_ON_ONCE(!t); 2923 sp.sched_priority = 2; 2924 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2925 } 2926 #endif 2927 } 2928 2929 /* Don't allow time recalculation while creating a new task. */ 2930 mutex_lock(&boost_mutex); 2931 rcu_torture_disable_rt_throttle(); 2932 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2933 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2934 cpu, "rcu_torture_boost_%u"); 2935 if (IS_ERR(boost_tasks[cpu])) { 2936 retval = PTR_ERR(boost_tasks[cpu]); 2937 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2938 n_rcu_torture_boost_ktrerror++; 2939 boost_tasks[cpu] = NULL; 2940 mutex_unlock(&boost_mutex); 2941 return retval; 2942 } 2943 mutex_unlock(&boost_mutex); 2944 return 0; 2945 } 2946 2947 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr) 2948 { 2949 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr); 2950 return NOTIFY_OK; 2951 } 2952 2953 static struct notifier_block rcu_torture_stall_block = { 2954 .notifier_call = rcu_torture_stall_nf, 2955 }; 2956 2957 /* 2958 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2959 * induces a CPU stall for the time specified by stall_cpu. If a new 2960 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. 2961 */ 2962 static void rcu_torture_stall_one(int rep, int irqsoff) 2963 { 2964 int idx; 2965 unsigned long stop_at; 2966 2967 if (stall_cpu_holdoff > 0) { 2968 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2969 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2970 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2971 } 2972 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2973 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2974 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2975 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2976 if (kthread_should_stop()) 2977 break; 2978 schedule_timeout_uninterruptible(HZ); 2979 } 2980 } 2981 if (!kthread_should_stop() && stall_cpu > 0) { 2982 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2983 stop_at = ktime_get_seconds() + stall_cpu; 2984 /* RCU CPU stall is expected behavior in following code. */ 2985 idx = cur_ops->readlock(); 2986 if (irqsoff) 2987 local_irq_disable(); 2988 else if (!stall_cpu_block) 2989 preempt_disable(); 2990 pr_alert("%s start stall episode %d on CPU %d.\n", 2991 __func__, rep + 1, raw_smp_processor_id()); 2992 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) && 2993 !kthread_should_stop()) 2994 if (stall_cpu_block) { 2995 #ifdef CONFIG_PREEMPTION 2996 preempt_schedule(); 2997 #else 2998 schedule_timeout_uninterruptible(HZ); 2999 #endif 3000 } else if (stall_no_softlockup) { 3001 touch_softlockup_watchdog(); 3002 } 3003 if (irqsoff) 3004 local_irq_enable(); 3005 else if (!stall_cpu_block) 3006 preempt_enable(); 3007 cur_ops->readunlock(idx); 3008 } 3009 } 3010 3011 /* 3012 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many 3013 * additional times as specified by the stall_cpu_repeat module parameter. 3014 * Note that stall_cpu_irqsoff is ignored on the second and subsequent 3015 * stall. 3016 */ 3017 static int rcu_torture_stall(void *args) 3018 { 3019 int i; 3020 int repeat = stall_cpu_repeat; 3021 int ret; 3022 3023 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 3024 if (repeat < 0) { 3025 repeat = 0; 3026 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); 3027 } 3028 if (rcu_cpu_stall_notifiers) { 3029 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block); 3030 if (ret) 3031 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n", 3032 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : ""); 3033 } 3034 for (i = 0; i <= repeat; i++) { 3035 if (kthread_should_stop()) 3036 break; 3037 rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0); 3038 } 3039 pr_alert("%s end.\n", __func__); 3040 if (rcu_cpu_stall_notifiers && !ret) { 3041 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block); 3042 if (ret) 3043 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret); 3044 } 3045 torture_shutdown_absorb("rcu_torture_stall"); 3046 while (!kthread_should_stop()) 3047 schedule_timeout_interruptible(10 * HZ); 3048 return 0; 3049 } 3050 3051 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 3052 static int __init rcu_torture_stall_init(void) 3053 { 3054 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 3055 return 0; 3056 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 3057 } 3058 3059 /* State structure for forward-progress self-propagating RCU callback. */ 3060 struct fwd_cb_state { 3061 struct rcu_head rh; 3062 int stop; 3063 }; 3064 3065 /* 3066 * Forward-progress self-propagating RCU callback function. Because 3067 * callbacks run from softirq, this function is an implicit RCU read-side 3068 * critical section. 3069 */ 3070 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 3071 { 3072 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 3073 3074 if (READ_ONCE(fcsp->stop)) { 3075 WRITE_ONCE(fcsp->stop, 2); 3076 return; 3077 } 3078 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 3079 } 3080 3081 /* State for continuous-flood RCU callbacks. */ 3082 struct rcu_fwd_cb { 3083 struct rcu_head rh; 3084 struct rcu_fwd_cb *rfc_next; 3085 struct rcu_fwd *rfc_rfp; 3086 int rfc_gps; 3087 }; 3088 3089 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 3090 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 3091 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 3092 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 3093 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 3094 3095 struct rcu_launder_hist { 3096 long n_launders; 3097 unsigned long launder_gp_seq; 3098 }; 3099 3100 struct rcu_fwd { 3101 spinlock_t rcu_fwd_lock; 3102 struct rcu_fwd_cb *rcu_fwd_cb_head; 3103 struct rcu_fwd_cb **rcu_fwd_cb_tail; 3104 long n_launders_cb; 3105 unsigned long rcu_fwd_startat; 3106 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 3107 unsigned long rcu_launder_gp_seq_start; 3108 int rcu_fwd_id; 3109 }; 3110 3111 static DEFINE_MUTEX(rcu_fwd_mutex); 3112 static struct rcu_fwd *rcu_fwds; 3113 static unsigned long rcu_fwd_seq; 3114 static atomic_long_t rcu_fwd_max_cbs; 3115 static bool rcu_fwd_emergency_stop; 3116 3117 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 3118 { 3119 unsigned long gps; 3120 unsigned long gps_old; 3121 int i; 3122 int j; 3123 3124 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 3125 if (rfp->n_launders_hist[i].n_launders > 0) 3126 break; 3127 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 3128 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 3129 gps_old = rfp->rcu_launder_gp_seq_start; 3130 for (j = 0; j <= i; j++) { 3131 gps = rfp->n_launders_hist[j].launder_gp_seq; 3132 pr_cont(" %ds/%d: %ld:%ld", 3133 j + 1, FWD_CBS_HIST_DIV, 3134 rfp->n_launders_hist[j].n_launders, 3135 rcutorture_seq_diff(gps, gps_old)); 3136 gps_old = gps; 3137 } 3138 pr_cont("\n"); 3139 } 3140 3141 /* Callback function for continuous-flood RCU callbacks. */ 3142 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 3143 { 3144 unsigned long flags; 3145 int i; 3146 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 3147 struct rcu_fwd_cb **rfcpp; 3148 struct rcu_fwd *rfp = rfcp->rfc_rfp; 3149 3150 rfcp->rfc_next = NULL; 3151 rfcp->rfc_gps++; 3152 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 3153 rfcpp = rfp->rcu_fwd_cb_tail; 3154 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 3155 smp_store_release(rfcpp, rfcp); 3156 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 3157 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 3158 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 3159 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 3160 rfp->n_launders_hist[i].n_launders++; 3161 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 3162 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3163 } 3164 3165 // Give the scheduler a chance, even on nohz_full CPUs. 3166 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 3167 { 3168 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 3169 // Real call_rcu() floods hit userspace, so emulate that. 3170 if (need_resched() || (iter & 0xfff)) 3171 schedule(); 3172 return; 3173 } 3174 // No userspace emulation: CB invocation throttles call_rcu() 3175 cond_resched(); 3176 } 3177 3178 /* 3179 * Free all callbacks on the rcu_fwd_cb_head list, either because the 3180 * test is over or because we hit an OOM event. 3181 */ 3182 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 3183 { 3184 unsigned long flags; 3185 unsigned long freed = 0; 3186 struct rcu_fwd_cb *rfcp; 3187 3188 for (;;) { 3189 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 3190 rfcp = rfp->rcu_fwd_cb_head; 3191 if (!rfcp) { 3192 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3193 break; 3194 } 3195 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 3196 if (!rfp->rcu_fwd_cb_head) 3197 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 3198 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 3199 kfree(rfcp); 3200 freed++; 3201 rcu_torture_fwd_prog_cond_resched(freed); 3202 if (tick_nohz_full_enabled()) { 3203 local_irq_save(flags); 3204 rcu_momentary_eqs(); 3205 local_irq_restore(flags); 3206 } 3207 } 3208 return freed; 3209 } 3210 3211 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 3212 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 3213 int *tested, int *tested_tries) 3214 { 3215 unsigned long cver; 3216 unsigned long dur; 3217 struct fwd_cb_state fcs; 3218 unsigned long gps; 3219 int idx; 3220 int sd; 3221 int sd4; 3222 bool selfpropcb = false; 3223 unsigned long stopat; 3224 static DEFINE_TORTURE_RANDOM(trs); 3225 3226 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3227 if (!cur_ops->sync) 3228 return; // Cannot do need_resched() forward progress testing without ->sync. 3229 if (cur_ops->call && cur_ops->cb_barrier) { 3230 init_rcu_head_on_stack(&fcs.rh); 3231 selfpropcb = true; 3232 } 3233 3234 /* Tight loop containing cond_resched(). */ 3235 atomic_inc(&rcu_fwd_cb_nodelay); 3236 cur_ops->sync(); /* Later readers see above write. */ 3237 if (selfpropcb) { 3238 WRITE_ONCE(fcs.stop, 0); 3239 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 3240 } 3241 cver = READ_ONCE(rcu_torture_current_version); 3242 gps = cur_ops->get_gp_seq(); 3243 sd = cur_ops->stall_dur() + 1; 3244 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 3245 dur = sd4 + torture_random(&trs) % (sd - sd4); 3246 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 3247 stopat = rfp->rcu_fwd_startat + dur; 3248 while (time_before(jiffies, stopat) && 3249 !shutdown_time_arrived() && 3250 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3251 idx = cur_ops->readlock(); 3252 udelay(10); 3253 cur_ops->readunlock(idx); 3254 if (!fwd_progress_need_resched || need_resched()) 3255 cond_resched(); 3256 } 3257 (*tested_tries)++; 3258 if (!time_before(jiffies, stopat) && 3259 !shutdown_time_arrived() && 3260 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3261 (*tested)++; 3262 cver = READ_ONCE(rcu_torture_current_version) - cver; 3263 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3264 WARN_ON(!cver && gps < 2); 3265 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 3266 rfp->rcu_fwd_id, dur, cver, gps); 3267 } 3268 if (selfpropcb) { 3269 WRITE_ONCE(fcs.stop, 1); 3270 cur_ops->sync(); /* Wait for running CB to complete. */ 3271 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3272 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 3273 } 3274 3275 if (selfpropcb) { 3276 WARN_ON(READ_ONCE(fcs.stop) != 2); 3277 destroy_rcu_head_on_stack(&fcs.rh); 3278 } 3279 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 3280 atomic_dec(&rcu_fwd_cb_nodelay); 3281 } 3282 3283 /* Carry out call_rcu() forward-progress testing. */ 3284 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 3285 { 3286 unsigned long cver; 3287 unsigned long flags; 3288 unsigned long gps; 3289 int i; 3290 long n_launders; 3291 long n_launders_cb_snap; 3292 long n_launders_sa; 3293 long n_max_cbs; 3294 long n_max_gps; 3295 struct rcu_fwd_cb *rfcp; 3296 struct rcu_fwd_cb *rfcpn; 3297 unsigned long stopat; 3298 unsigned long stoppedat; 3299 3300 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3301 if (READ_ONCE(rcu_fwd_emergency_stop)) 3302 return; /* Get out of the way quickly, no GP wait! */ 3303 if (!cur_ops->call) 3304 return; /* Can't do call_rcu() fwd prog without ->call. */ 3305 3306 /* Loop continuously posting RCU callbacks. */ 3307 atomic_inc(&rcu_fwd_cb_nodelay); 3308 cur_ops->sync(); /* Later readers see above write. */ 3309 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 3310 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 3311 n_launders = 0; 3312 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 3313 n_launders_sa = 0; 3314 n_max_cbs = 0; 3315 n_max_gps = 0; 3316 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 3317 rfp->n_launders_hist[i].n_launders = 0; 3318 cver = READ_ONCE(rcu_torture_current_version); 3319 gps = cur_ops->get_gp_seq(); 3320 rfp->rcu_launder_gp_seq_start = gps; 3321 tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. 3322 while (time_before(jiffies, stopat) && 3323 !shutdown_time_arrived() && 3324 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 3325 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 3326 rfcpn = NULL; 3327 if (rfcp) 3328 rfcpn = READ_ONCE(rfcp->rfc_next); 3329 if (rfcpn) { 3330 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 3331 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 3332 break; 3333 rfp->rcu_fwd_cb_head = rfcpn; 3334 n_launders++; 3335 n_launders_sa++; 3336 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 3337 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 3338 if (WARN_ON_ONCE(!rfcp)) { 3339 schedule_timeout_interruptible(1); 3340 continue; 3341 } 3342 n_max_cbs++; 3343 n_launders_sa = 0; 3344 rfcp->rfc_gps = 0; 3345 rfcp->rfc_rfp = rfp; 3346 } else { 3347 rfcp = NULL; 3348 } 3349 if (rfcp) 3350 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 3351 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 3352 if (tick_nohz_full_enabled()) { 3353 local_irq_save(flags); 3354 rcu_momentary_eqs(); 3355 local_irq_restore(flags); 3356 } 3357 } 3358 stoppedat = jiffies; 3359 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 3360 cver = READ_ONCE(rcu_torture_current_version) - cver; 3361 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 3362 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 3363 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 3364 (void)rcu_torture_fwd_prog_cbfree(rfp); 3365 3366 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 3367 !shutdown_time_arrived()) { 3368 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg) 3369 cur_ops->gp_kthread_dbg(); 3370 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n", 3371 __func__, 3372 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 3373 n_launders + n_max_cbs - n_launders_cb_snap, 3374 n_launders, n_launders_sa, 3375 n_max_gps, n_max_cbs, cver, gps, num_online_cpus()); 3376 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 3377 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 3378 rcu_torture_fwd_cb_hist(rfp); 3379 mutex_unlock(&rcu_fwd_mutex); 3380 } 3381 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 3382 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 3383 atomic_dec(&rcu_fwd_cb_nodelay); 3384 } 3385 3386 3387 /* 3388 * OOM notifier, but this only prints diagnostic information for the 3389 * current forward-progress test. 3390 */ 3391 static int rcutorture_oom_notify(struct notifier_block *self, 3392 unsigned long notused, void *nfreed) 3393 { 3394 int i; 3395 long ncbs; 3396 struct rcu_fwd *rfp; 3397 3398 mutex_lock(&rcu_fwd_mutex); 3399 rfp = rcu_fwds; 3400 if (!rfp) { 3401 mutex_unlock(&rcu_fwd_mutex); 3402 return NOTIFY_OK; 3403 } 3404 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 3405 __func__); 3406 for (i = 0; i < fwd_progress; i++) { 3407 rcu_torture_fwd_cb_hist(&rfp[i]); 3408 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 3409 } 3410 WRITE_ONCE(rcu_fwd_emergency_stop, true); 3411 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 3412 ncbs = 0; 3413 for (i = 0; i < fwd_progress; i++) 3414 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3415 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3416 cur_ops->cb_barrier(); 3417 ncbs = 0; 3418 for (i = 0; i < fwd_progress; i++) 3419 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3420 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3421 cur_ops->cb_barrier(); 3422 ncbs = 0; 3423 for (i = 0; i < fwd_progress; i++) 3424 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 3425 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 3426 smp_mb(); /* Frees before return to avoid redoing OOM. */ 3427 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 3428 pr_info("%s returning after OOM processing.\n", __func__); 3429 mutex_unlock(&rcu_fwd_mutex); 3430 return NOTIFY_OK; 3431 } 3432 3433 static struct notifier_block rcutorture_oom_nb = { 3434 .notifier_call = rcutorture_oom_notify 3435 }; 3436 3437 /* Carry out grace-period forward-progress testing. */ 3438 static int rcu_torture_fwd_prog(void *args) 3439 { 3440 bool firsttime = true; 3441 long max_cbs; 3442 int oldnice = task_nice(current); 3443 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 3444 struct rcu_fwd *rfp = args; 3445 int tested = 0; 3446 int tested_tries = 0; 3447 3448 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 3449 rcu_bind_current_to_nocb(); 3450 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 3451 set_user_nice(current, MAX_NICE); 3452 do { 3453 if (!rfp->rcu_fwd_id) { 3454 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 3455 WRITE_ONCE(rcu_fwd_emergency_stop, false); 3456 if (!firsttime) { 3457 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 3458 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 3459 } 3460 firsttime = false; 3461 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 3462 } else { 3463 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 3464 schedule_timeout_interruptible(HZ / 20); 3465 oldseq = READ_ONCE(rcu_fwd_seq); 3466 } 3467 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 3468 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 3469 rcu_torture_fwd_prog_cr(rfp); 3470 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 3471 (!IS_ENABLED(CONFIG_TINY_RCU) || 3472 (rcu_inkernel_boot_has_ended() && 3473 torture_num_online_cpus() > rfp->rcu_fwd_id))) 3474 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 3475 3476 /* Avoid slow periods, better to test when busy. */ 3477 if (stutter_wait("rcu_torture_fwd_prog")) 3478 sched_set_normal(current, oldnice); 3479 } while (!torture_must_stop()); 3480 /* Short runs might not contain a valid forward-progress attempt. */ 3481 if (!rfp->rcu_fwd_id) { 3482 WARN_ON(!tested && tested_tries >= 5); 3483 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 3484 } 3485 torture_kthread_stopping("rcu_torture_fwd_prog"); 3486 return 0; 3487 } 3488 3489 /* If forward-progress checking is requested and feasible, spawn the thread. */ 3490 static int __init rcu_torture_fwd_prog_init(void) 3491 { 3492 int i; 3493 int ret = 0; 3494 struct rcu_fwd *rfp; 3495 3496 if (!fwd_progress) 3497 return 0; /* Not requested, so don't do it. */ 3498 if (fwd_progress >= nr_cpu_ids) { 3499 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 3500 fwd_progress = nr_cpu_ids; 3501 } else if (fwd_progress < 0) { 3502 fwd_progress = nr_cpu_ids; 3503 } 3504 if ((!cur_ops->sync && !cur_ops->call) || 3505 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 3506 cur_ops == &rcu_busted_ops) { 3507 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 3508 fwd_progress = 0; 3509 return 0; 3510 } 3511 if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 3512 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing"); 3513 fwd_progress = 0; 3514 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 3515 return -EINVAL; /* In module, can fail back to user. */ 3516 WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */ 3517 return 0; 3518 } 3519 if (fwd_progress_holdoff <= 0) 3520 fwd_progress_holdoff = 1; 3521 if (fwd_progress_div <= 0) 3522 fwd_progress_div = 4; 3523 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 3524 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 3525 if (!rfp || !fwd_prog_tasks) { 3526 kfree(rfp); 3527 kfree(fwd_prog_tasks); 3528 fwd_prog_tasks = NULL; 3529 fwd_progress = 0; 3530 return -ENOMEM; 3531 } 3532 for (i = 0; i < fwd_progress; i++) { 3533 spin_lock_init(&rfp[i].rcu_fwd_lock); 3534 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 3535 rfp[i].rcu_fwd_id = i; 3536 } 3537 mutex_lock(&rcu_fwd_mutex); 3538 rcu_fwds = rfp; 3539 mutex_unlock(&rcu_fwd_mutex); 3540 register_oom_notifier(&rcutorture_oom_nb); 3541 for (i = 0; i < fwd_progress; i++) { 3542 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 3543 if (ret) { 3544 fwd_progress = i; 3545 return ret; 3546 } 3547 } 3548 return 0; 3549 } 3550 3551 static void rcu_torture_fwd_prog_cleanup(void) 3552 { 3553 int i; 3554 struct rcu_fwd *rfp; 3555 3556 if (!rcu_fwds || !fwd_prog_tasks) 3557 return; 3558 for (i = 0; i < fwd_progress; i++) 3559 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 3560 unregister_oom_notifier(&rcutorture_oom_nb); 3561 mutex_lock(&rcu_fwd_mutex); 3562 rfp = rcu_fwds; 3563 rcu_fwds = NULL; 3564 mutex_unlock(&rcu_fwd_mutex); 3565 kfree(rfp); 3566 kfree(fwd_prog_tasks); 3567 fwd_prog_tasks = NULL; 3568 } 3569 3570 /* Callback function for RCU barrier testing. */ 3571 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3572 { 3573 atomic_inc(&barrier_cbs_invoked); 3574 } 3575 3576 /* IPI handler to get callback posted on desired CPU, if online. */ 3577 static int rcu_torture_barrier1cb(void *rcu_void) 3578 { 3579 struct rcu_head *rhp = rcu_void; 3580 3581 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3582 return 0; 3583 } 3584 3585 /* kthread function to register callbacks used to test RCU barriers. */ 3586 static int rcu_torture_barrier_cbs(void *arg) 3587 { 3588 long myid = (long)arg; 3589 bool lastphase = false; 3590 bool newphase; 3591 struct rcu_head rcu; 3592 3593 init_rcu_head_on_stack(&rcu); 3594 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3595 set_user_nice(current, MAX_NICE); 3596 do { 3597 wait_event(barrier_cbs_wq[myid], 3598 (newphase = 3599 smp_load_acquire(&barrier_phase)) != lastphase || 3600 torture_must_stop()); 3601 lastphase = newphase; 3602 if (torture_must_stop()) 3603 break; 3604 /* 3605 * The above smp_load_acquire() ensures barrier_phase load 3606 * is ordered before the following ->call(). 3607 */ 3608 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1)) 3609 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3610 3611 if (atomic_dec_and_test(&barrier_cbs_count)) 3612 wake_up(&barrier_wq); 3613 } while (!torture_must_stop()); 3614 if (cur_ops->cb_barrier != NULL) 3615 cur_ops->cb_barrier(); 3616 destroy_rcu_head_on_stack(&rcu); 3617 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3618 return 0; 3619 } 3620 3621 /* kthread function to drive and coordinate RCU barrier testing. */ 3622 static int rcu_torture_barrier(void *arg) 3623 { 3624 int i; 3625 3626 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3627 do { 3628 atomic_set(&barrier_cbs_invoked, 0); 3629 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3630 /* Ensure barrier_phase ordered after prior assignments. */ 3631 smp_store_release(&barrier_phase, !barrier_phase); 3632 for (i = 0; i < n_barrier_cbs; i++) 3633 wake_up(&barrier_cbs_wq[i]); 3634 wait_event(barrier_wq, 3635 atomic_read(&barrier_cbs_count) == 0 || 3636 torture_must_stop()); 3637 if (torture_must_stop()) 3638 break; 3639 n_barrier_attempts++; 3640 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3641 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3642 n_rcu_torture_barrier_error++; 3643 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3644 atomic_read(&barrier_cbs_invoked), 3645 n_barrier_cbs); 3646 WARN_ON(1); 3647 // Wait manually for the remaining callbacks 3648 i = 0; 3649 do { 3650 if (WARN_ON(i++ > HZ)) 3651 i = INT_MIN; 3652 schedule_timeout_interruptible(1); 3653 cur_ops->cb_barrier(); 3654 } while (atomic_read(&barrier_cbs_invoked) != 3655 n_barrier_cbs && 3656 !torture_must_stop()); 3657 smp_mb(); // Can't trust ordering if broken. 3658 if (!torture_must_stop()) 3659 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3660 atomic_read(&barrier_cbs_invoked)); 3661 } else { 3662 n_barrier_successes++; 3663 } 3664 schedule_timeout_interruptible(HZ / 10); 3665 } while (!torture_must_stop()); 3666 torture_kthread_stopping("rcu_torture_barrier"); 3667 return 0; 3668 } 3669 3670 /* Initialize RCU barrier testing. */ 3671 static int rcu_torture_barrier_init(void) 3672 { 3673 int i; 3674 int ret; 3675 3676 if (n_barrier_cbs <= 0) 3677 return 0; 3678 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3679 pr_alert("%s" TORTURE_FLAG 3680 " Call or barrier ops missing for %s,\n", 3681 torture_type, cur_ops->name); 3682 pr_alert("%s" TORTURE_FLAG 3683 " RCU barrier testing omitted from run.\n", 3684 torture_type); 3685 return 0; 3686 } 3687 atomic_set(&barrier_cbs_count, 0); 3688 atomic_set(&barrier_cbs_invoked, 0); 3689 barrier_cbs_tasks = 3690 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3691 GFP_KERNEL); 3692 barrier_cbs_wq = 3693 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3694 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3695 return -ENOMEM; 3696 for (i = 0; i < n_barrier_cbs; i++) { 3697 init_waitqueue_head(&barrier_cbs_wq[i]); 3698 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3699 (void *)(long)i, 3700 barrier_cbs_tasks[i]); 3701 if (ret) 3702 return ret; 3703 } 3704 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3705 } 3706 3707 /* Clean up after RCU barrier testing. */ 3708 static void rcu_torture_barrier_cleanup(void) 3709 { 3710 int i; 3711 3712 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3713 if (barrier_cbs_tasks != NULL) { 3714 for (i = 0; i < n_barrier_cbs; i++) 3715 torture_stop_kthread(rcu_torture_barrier_cbs, 3716 barrier_cbs_tasks[i]); 3717 kfree(barrier_cbs_tasks); 3718 barrier_cbs_tasks = NULL; 3719 } 3720 if (barrier_cbs_wq != NULL) { 3721 kfree(barrier_cbs_wq); 3722 barrier_cbs_wq = NULL; 3723 } 3724 } 3725 3726 static bool rcu_torture_can_boost(void) 3727 { 3728 static int boost_warn_once; 3729 int prio; 3730 3731 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3732 return false; 3733 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3734 return false; 3735 3736 prio = rcu_get_gp_kthreads_prio(); 3737 if (!prio) 3738 return false; 3739 3740 if (prio < 2) { 3741 if (boost_warn_once == 1) 3742 return false; 3743 3744 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3745 boost_warn_once = 1; 3746 return false; 3747 } 3748 3749 return true; 3750 } 3751 3752 static bool read_exit_child_stop; 3753 static bool read_exit_child_stopped; 3754 static wait_queue_head_t read_exit_wq; 3755 3756 // Child kthread which just does an rcutorture reader and exits. 3757 static int rcu_torture_read_exit_child(void *trsp_in) 3758 { 3759 struct torture_random_state *trsp = trsp_in; 3760 3761 set_user_nice(current, MAX_NICE); 3762 // Minimize time between reading and exiting. 3763 while (!kthread_should_stop()) 3764 schedule_timeout_uninterruptible(HZ / 20); 3765 (void)rcu_torture_one_read(trsp, -1); 3766 return 0; 3767 } 3768 3769 // Parent kthread which creates and destroys read-exit child kthreads. 3770 static int rcu_torture_read_exit(void *unused) 3771 { 3772 bool errexit = false; 3773 int i; 3774 struct task_struct *tsp; 3775 DEFINE_TORTURE_RANDOM(trs); 3776 3777 // Allocate and initialize. 3778 set_user_nice(current, MAX_NICE); 3779 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3780 3781 // Each pass through this loop does one read-exit episode. 3782 do { 3783 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3784 for (i = 0; i < read_exit_burst; i++) { 3785 if (READ_ONCE(read_exit_child_stop)) 3786 break; 3787 stutter_wait("rcu_torture_read_exit"); 3788 // Spawn child. 3789 tsp = kthread_run(rcu_torture_read_exit_child, 3790 &trs, "%s", "rcu_torture_read_exit_child"); 3791 if (IS_ERR(tsp)) { 3792 TOROUT_ERRSTRING("out of memory"); 3793 errexit = true; 3794 break; 3795 } 3796 cond_resched(); 3797 kthread_stop(tsp); 3798 n_read_exits++; 3799 } 3800 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3801 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3802 i = 0; 3803 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3804 schedule_timeout_uninterruptible(HZ); 3805 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3806 3807 // Clean up and exit. 3808 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3809 smp_mb(); // Store before wakeup. 3810 wake_up(&read_exit_wq); 3811 while (!torture_must_stop()) 3812 schedule_timeout_uninterruptible(HZ / 20); 3813 torture_kthread_stopping("rcu_torture_read_exit"); 3814 return 0; 3815 } 3816 3817 static int rcu_torture_read_exit_init(void) 3818 { 3819 if (read_exit_burst <= 0) 3820 return 0; 3821 init_waitqueue_head(&read_exit_wq); 3822 read_exit_child_stop = false; 3823 read_exit_child_stopped = false; 3824 return torture_create_kthread(rcu_torture_read_exit, NULL, 3825 read_exit_task); 3826 } 3827 3828 static void rcu_torture_read_exit_cleanup(void) 3829 { 3830 if (!read_exit_task) 3831 return; 3832 WRITE_ONCE(read_exit_child_stop, true); 3833 smp_mb(); // Above write before wait. 3834 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3835 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3836 } 3837 3838 static void rcutorture_test_nmis(int n) 3839 { 3840 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3841 int cpu; 3842 int dumpcpu; 3843 int i; 3844 3845 for (i = 0; i < n; i++) { 3846 preempt_disable(); 3847 cpu = smp_processor_id(); 3848 dumpcpu = cpu + 1; 3849 if (dumpcpu >= nr_cpu_ids) 3850 dumpcpu = 0; 3851 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu); 3852 dump_cpu_task(dumpcpu); 3853 preempt_enable(); 3854 schedule_timeout_uninterruptible(15 * HZ); 3855 } 3856 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3857 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis); 3858 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) 3859 } 3860 3861 // Randomly preempt online CPUs. 3862 static int rcu_torture_preempt(void *unused) 3863 { 3864 int cpu = -1; 3865 DEFINE_TORTURE_RANDOM(rand); 3866 3867 schedule_timeout_idle(stall_cpu_holdoff); 3868 do { 3869 // Wait for preempt_interval ms with up to 100us fuzz. 3870 torture_hrtimeout_ms(preempt_interval, 100, &rand); 3871 // Select online CPU. 3872 cpu = cpumask_next(cpu, cpu_online_mask); 3873 if (cpu >= nr_cpu_ids) 3874 cpu = cpumask_next(-1, cpu_online_mask); 3875 WARN_ON_ONCE(cpu >= nr_cpu_ids); 3876 // Move to that CPU, if can't do so, retry later. 3877 if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false)) 3878 continue; 3879 // Preempt at high-ish priority, then reset to normal. 3880 sched_set_fifo(current); 3881 torture_sched_setaffinity(current->pid, cpu_present_mask, true); 3882 mdelay(preempt_duration); 3883 sched_set_normal(current, 0); 3884 stutter_wait("rcu_torture_preempt"); 3885 } while (!torture_must_stop()); 3886 torture_kthread_stopping("rcu_torture_preempt"); 3887 return 0; 3888 } 3889 3890 static enum cpuhp_state rcutor_hp; 3891 3892 static struct hrtimer gpwrap_lag_timer; 3893 static bool gpwrap_lag_active; 3894 3895 /* Timer handler for toggling RCU grace-period sequence overflow test lag value */ 3896 static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer) 3897 { 3898 ktime_t next_delay; 3899 3900 if (gpwrap_lag_active) { 3901 pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n"); 3902 cur_ops->set_gpwrap_lag(0); 3903 gpwrap_lag_active = false; 3904 next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0); 3905 } else { 3906 pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps); 3907 cur_ops->set_gpwrap_lag(gpwrap_lag_gps); 3908 gpwrap_lag_active = true; 3909 next_delay = ktime_set(gpwrap_lag_active_mins * 60, 0); 3910 } 3911 3912 if (torture_must_stop_irq()) 3913 return HRTIMER_NORESTART; 3914 3915 hrtimer_forward_now(timer, next_delay); 3916 return HRTIMER_RESTART; 3917 } 3918 3919 static int rcu_gpwrap_lag_init(void) 3920 { 3921 if (!gpwrap_lag) 3922 return 0; 3923 3924 if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) { 3925 pr_alert("rcu-torture: lag timing parameters must be positive\n"); 3926 return -EINVAL; 3927 } 3928 3929 hrtimer_setup(&gpwrap_lag_timer, rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3930 gpwrap_lag_active = false; 3931 hrtimer_start(&gpwrap_lag_timer, 3932 ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL); 3933 3934 return 0; 3935 } 3936 3937 static void rcu_gpwrap_lag_cleanup(void) 3938 { 3939 hrtimer_cancel(&gpwrap_lag_timer); 3940 cur_ops->set_gpwrap_lag(0); 3941 gpwrap_lag_active = false; 3942 } 3943 static void 3944 rcu_torture_cleanup(void) 3945 { 3946 int firsttime; 3947 int flags = 0; 3948 unsigned long gp_seq = 0; 3949 int i; 3950 int j; 3951 3952 if (torture_cleanup_begin()) { 3953 if (cur_ops->cb_barrier != NULL) { 3954 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3955 cur_ops->cb_barrier(); 3956 } 3957 if (cur_ops->gp_slow_unregister) 3958 cur_ops->gp_slow_unregister(NULL); 3959 return; 3960 } 3961 if (!cur_ops) { 3962 torture_cleanup_end(); 3963 return; 3964 } 3965 3966 rcutorture_test_nmis(test_nmis); 3967 3968 if (cur_ops->gp_kthread_dbg) 3969 cur_ops->gp_kthread_dbg(); 3970 torture_stop_kthread(rcu_torture_preempt, preempt_task); 3971 rcu_torture_read_exit_cleanup(); 3972 rcu_torture_barrier_cleanup(); 3973 rcu_torture_fwd_prog_cleanup(); 3974 torture_stop_kthread(rcu_torture_stall, stall_task); 3975 torture_stop_kthread(rcu_torture_writer, writer_task); 3976 3977 if (nocb_tasks) { 3978 for (i = 0; i < nrealnocbers; i++) 3979 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3980 kfree(nocb_tasks); 3981 nocb_tasks = NULL; 3982 } 3983 3984 if (updown_task) { 3985 torture_stop_kthread(rcu_torture_updown, updown_task); 3986 updown_task = NULL; 3987 } 3988 if (reader_tasks) { 3989 for (i = 0; i < nrealreaders; i++) 3990 torture_stop_kthread(rcu_torture_reader, 3991 reader_tasks[i]); 3992 kfree(reader_tasks); 3993 reader_tasks = NULL; 3994 } 3995 kfree(rcu_torture_reader_mbchk); 3996 rcu_torture_reader_mbchk = NULL; 3997 3998 if (fakewriter_tasks) { 3999 for (i = 0; i < nrealfakewriters; i++) 4000 torture_stop_kthread(rcu_torture_fakewriter, 4001 fakewriter_tasks[i]); 4002 kfree(fakewriter_tasks); 4003 fakewriter_tasks = NULL; 4004 } 4005 4006 if (cur_ops->get_gp_data) 4007 cur_ops->get_gp_data(&flags, &gp_seq); 4008 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 4009 cur_ops->name, (long)gp_seq, flags, 4010 rcutorture_seq_diff(gp_seq, start_gp_seq)); 4011 torture_stop_kthread(rcu_torture_stats, stats_task); 4012 torture_stop_kthread(rcu_torture_fqs, fqs_task); 4013 if (rcu_torture_can_boost() && rcutor_hp >= 0) 4014 cpuhp_remove_state(rcutor_hp); 4015 4016 /* 4017 * Wait for all RCU callbacks to fire, then do torture-type-specific 4018 * cleanup operations. 4019 */ 4020 if (cur_ops->cb_barrier != NULL) { 4021 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 4022 cur_ops->cb_barrier(); 4023 } 4024 if (cur_ops->cleanup != NULL) 4025 cur_ops->cleanup(); 4026 4027 rcu_torture_mem_dump_obj(); 4028 4029 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 4030 4031 if (err_segs_recorded) { 4032 pr_alert("Failure/close-call rcutorture reader segments:\n"); 4033 if (rt_read_nsegs == 0) 4034 pr_alert("\t: No segments recorded!!!\n"); 4035 firsttime = 1; 4036 for (i = 0; i < rt_read_nsegs; i++) { 4037 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP)) 4038 pr_alert("\t%lluus ", div64_u64(err_segs[i].rt_ts, 1000ULL)); 4039 else 4040 pr_alert("\t"); 4041 pr_cont("%d: %#4x", i, err_segs[i].rt_readstate); 4042 if (err_segs[i].rt_delay_jiffies != 0) { 4043 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 4044 err_segs[i].rt_delay_jiffies); 4045 firsttime = 0; 4046 } 4047 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) { 4048 pr_cont(" CPU %2d", err_segs[i].rt_cpu); 4049 if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu) 4050 pr_cont("->%-2d", err_segs[i].rt_end_cpu); 4051 else 4052 pr_cont(" ..."); 4053 } 4054 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && 4055 cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) { 4056 char buf1[20+1]; 4057 char buf2[20+1]; 4058 char sepchar = '-'; 4059 4060 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq, 4061 buf1, ARRAY_SIZE(buf1)); 4062 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end, 4063 buf2, ARRAY_SIZE(buf2)); 4064 if (err_segs[i].rt_gp_seq == err_segs[i].rt_gp_seq_end) { 4065 if (buf2[0]) { 4066 for (j = 0; buf2[j]; j++) 4067 buf2[j] = '.'; 4068 if (j) 4069 buf2[j - 1] = ' '; 4070 } 4071 sepchar = ' '; 4072 } 4073 pr_cont(" %s%c%s", buf1, sepchar, buf2); 4074 } 4075 if (err_segs[i].rt_delay_ms != 0) { 4076 pr_cont(" %s%ldms", firsttime ? "" : "+", 4077 err_segs[i].rt_delay_ms); 4078 firsttime = 0; 4079 } 4080 if (err_segs[i].rt_delay_us != 0) { 4081 pr_cont(" %s%ldus", firsttime ? "" : "+", 4082 err_segs[i].rt_delay_us); 4083 firsttime = 0; 4084 } 4085 pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : ""); 4086 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH) 4087 pr_cont(" BH"); 4088 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ) 4089 pr_cont(" IRQ"); 4090 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT) 4091 pr_cont(" PREEMPT"); 4092 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH) 4093 pr_cont(" RBH"); 4094 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED) 4095 pr_cont(" SCHED"); 4096 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1) 4097 pr_cont(" RCU_1"); 4098 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2) 4099 pr_cont(" RCU_2"); 4100 pr_cont("\n"); 4101 4102 } 4103 if (rt_read_preempted) 4104 pr_alert("\tReader was preempted.\n"); 4105 } 4106 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 4107 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 4108 else if (torture_onoff_failures()) 4109 rcu_torture_print_module_parms(cur_ops, 4110 "End of test: RCU_HOTPLUG"); 4111 else 4112 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 4113 torture_cleanup_end(); 4114 if (cur_ops->gp_slow_unregister) 4115 cur_ops->gp_slow_unregister(NULL); 4116 4117 if (gpwrap_lag && cur_ops->set_gpwrap_lag) 4118 rcu_gpwrap_lag_cleanup(); 4119 } 4120 4121 static void rcu_torture_leak_cb(struct rcu_head *rhp) 4122 { 4123 } 4124 4125 static void rcu_torture_err_cb(struct rcu_head *rhp) 4126 { 4127 /* 4128 * This -might- happen due to race conditions, but is unlikely. 4129 * The scenario that leads to this happening is that the 4130 * first of the pair of duplicate callbacks is queued, 4131 * someone else starts a grace period that includes that 4132 * callback, then the second of the pair must wait for the 4133 * next grace period. Unlikely, but can happen. If it 4134 * does happen, the debug-objects subsystem won't have splatted. 4135 */ 4136 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 4137 } 4138 4139 /* 4140 * Verify that double-free causes debug-objects to complain, but only 4141 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 4142 * cannot be carried out. 4143 */ 4144 static void rcu_test_debug_objects(void) 4145 { 4146 struct rcu_head rh1; 4147 struct rcu_head rh2; 4148 int idx; 4149 4150 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) { 4151 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n", 4152 KBUILD_MODNAME, cur_ops->name); 4153 return; 4154 } 4155 4156 if (WARN_ON_ONCE(cur_ops->debug_objects && 4157 (!cur_ops->call || !cur_ops->cb_barrier))) 4158 return; 4159 4160 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 4161 4162 init_rcu_head_on_stack(&rh1); 4163 init_rcu_head_on_stack(&rh2); 4164 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name); 4165 4166 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 4167 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */ 4168 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 4169 cur_ops->call(&rh2, rcu_torture_leak_cb); 4170 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 4171 if (rhp) { 4172 cur_ops->call(rhp, rcu_torture_leak_cb); 4173 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 4174 } 4175 cur_ops->readunlock(idx); 4176 4177 /* Wait for them all to get done so we can safely return. */ 4178 cur_ops->cb_barrier(); 4179 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name); 4180 destroy_rcu_head_on_stack(&rh1); 4181 destroy_rcu_head_on_stack(&rh2); 4182 kfree(rhp); 4183 } 4184 4185 static void rcutorture_sync(void) 4186 { 4187 static unsigned long n; 4188 4189 if (cur_ops->sync && !(++n & 0xfff)) 4190 cur_ops->sync(); 4191 } 4192 4193 static DEFINE_MUTEX(mut0); 4194 static DEFINE_MUTEX(mut1); 4195 static DEFINE_MUTEX(mut2); 4196 static DEFINE_MUTEX(mut3); 4197 static DEFINE_MUTEX(mut4); 4198 static DEFINE_MUTEX(mut5); 4199 static DEFINE_MUTEX(mut6); 4200 static DEFINE_MUTEX(mut7); 4201 static DEFINE_MUTEX(mut8); 4202 static DEFINE_MUTEX(mut9); 4203 4204 static DECLARE_RWSEM(rwsem0); 4205 static DECLARE_RWSEM(rwsem1); 4206 static DECLARE_RWSEM(rwsem2); 4207 static DECLARE_RWSEM(rwsem3); 4208 static DECLARE_RWSEM(rwsem4); 4209 static DECLARE_RWSEM(rwsem5); 4210 static DECLARE_RWSEM(rwsem6); 4211 static DECLARE_RWSEM(rwsem7); 4212 static DECLARE_RWSEM(rwsem8); 4213 static DECLARE_RWSEM(rwsem9); 4214 4215 DEFINE_STATIC_SRCU(srcu0); 4216 DEFINE_STATIC_SRCU(srcu1); 4217 DEFINE_STATIC_SRCU(srcu2); 4218 DEFINE_STATIC_SRCU(srcu3); 4219 DEFINE_STATIC_SRCU(srcu4); 4220 DEFINE_STATIC_SRCU(srcu5); 4221 DEFINE_STATIC_SRCU(srcu6); 4222 DEFINE_STATIC_SRCU(srcu7); 4223 DEFINE_STATIC_SRCU(srcu8); 4224 DEFINE_STATIC_SRCU(srcu9); 4225 4226 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i, 4227 int cyclelen, int deadlock) 4228 { 4229 int j = i + 1; 4230 4231 if (j >= cyclelen) 4232 j = deadlock ? 0 : -1; 4233 if (j >= 0) 4234 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i); 4235 else 4236 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i); 4237 return j; 4238 } 4239 4240 // Test lockdep on SRCU-based deadlock scenarios. 4241 static void rcu_torture_init_srcu_lockdep(void) 4242 { 4243 int cyclelen; 4244 int deadlock; 4245 bool err = false; 4246 int i; 4247 int j; 4248 int idx; 4249 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4, 4250 &mut5, &mut6, &mut7, &mut8, &mut9 }; 4251 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4, 4252 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 }; 4253 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4, 4254 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 }; 4255 int testtype; 4256 4257 if (!test_srcu_lockdep) 4258 return; 4259 4260 deadlock = test_srcu_lockdep / 1000; 4261 testtype = (test_srcu_lockdep / 10) % 100; 4262 cyclelen = test_srcu_lockdep % 10; 4263 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus)); 4264 if (WARN_ONCE(deadlock != !!deadlock, 4265 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n", 4266 __func__, test_srcu_lockdep, deadlock)) 4267 err = true; 4268 if (WARN_ONCE(cyclelen <= 0, 4269 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", 4270 __func__, test_srcu_lockdep, cyclelen)) 4271 err = true; 4272 if (err) 4273 goto err_out; 4274 4275 if (testtype == 0) { 4276 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", 4277 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4278 if (deadlock && cyclelen == 1) 4279 pr_info("%s: Expect hang.\n", __func__); 4280 for (i = 0; i < cyclelen; i++) { 4281 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu", 4282 "srcu_read_unlock", i, cyclelen, deadlock); 4283 idx = srcu_read_lock(srcus[i]); 4284 if (j >= 0) 4285 synchronize_srcu(srcus[j]); 4286 srcu_read_unlock(srcus[i], idx); 4287 } 4288 return; 4289 } 4290 4291 if (testtype == 1) { 4292 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", 4293 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4294 for (i = 0; i < cyclelen; i++) { 4295 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n", 4296 __func__, i, i, i, i); 4297 idx = srcu_read_lock(srcus[i]); 4298 mutex_lock(muts[i]); 4299 mutex_unlock(muts[i]); 4300 srcu_read_unlock(srcus[i], idx); 4301 4302 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu", 4303 "mutex_unlock", i, cyclelen, deadlock); 4304 mutex_lock(muts[i]); 4305 if (j >= 0) 4306 synchronize_srcu(srcus[j]); 4307 mutex_unlock(muts[i]); 4308 } 4309 return; 4310 } 4311 4312 if (testtype == 2) { 4313 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", 4314 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4315 for (i = 0; i < cyclelen; i++) { 4316 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n", 4317 __func__, i, i, i, i); 4318 idx = srcu_read_lock(srcus[i]); 4319 down_read(rwsems[i]); 4320 up_read(rwsems[i]); 4321 srcu_read_unlock(srcus[i], idx); 4322 4323 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu", 4324 "up_write", i, cyclelen, deadlock); 4325 down_write(rwsems[i]); 4326 if (j >= 0) 4327 synchronize_srcu(srcus[j]); 4328 up_write(rwsems[i]); 4329 } 4330 return; 4331 } 4332 4333 #ifdef CONFIG_TASKS_TRACE_RCU 4334 if (testtype == 3) { 4335 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", 4336 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); 4337 if (deadlock && cyclelen == 1) 4338 pr_info("%s: Expect hang.\n", __func__); 4339 for (i = 0; i < cyclelen; i++) { 4340 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock"; 4341 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" 4342 : "synchronize_srcu"; 4343 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock"; 4344 4345 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock); 4346 if (i == 0) 4347 rcu_read_lock_trace(); 4348 else 4349 idx = srcu_read_lock(srcus[i]); 4350 if (j >= 0) { 4351 if (i == cyclelen - 1) 4352 synchronize_rcu_tasks_trace(); 4353 else 4354 synchronize_srcu(srcus[j]); 4355 } 4356 if (i == 0) 4357 rcu_read_unlock_trace(); 4358 else 4359 srcu_read_unlock(srcus[i], idx); 4360 } 4361 return; 4362 } 4363 #endif // #ifdef CONFIG_TASKS_TRACE_RCU 4364 4365 err_out: 4366 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep); 4367 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__); 4368 pr_info("%s: D: Deadlock if nonzero.\n", __func__); 4369 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__); 4370 pr_info("%s: L: Cycle length.\n", __func__); 4371 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU)) 4372 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__); 4373 } 4374 4375 static int __init 4376 rcu_torture_init(void) 4377 { 4378 long i; 4379 int cpu; 4380 int firsterr = 0; 4381 int flags = 0; 4382 unsigned long gp_seq = 0; 4383 static struct rcu_torture_ops *torture_ops[] = { 4384 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 4385 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 4386 &trivial_ops, 4387 }; 4388 4389 if (!torture_init_begin(torture_type, verbose)) 4390 return -EBUSY; 4391 4392 /* Process args and tell the world that the torturer is on the job. */ 4393 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 4394 cur_ops = torture_ops[i]; 4395 if (strcmp(torture_type, cur_ops->name) == 0) 4396 break; 4397 } 4398 if (i == ARRAY_SIZE(torture_ops)) { 4399 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 4400 torture_type); 4401 pr_alert("rcu-torture types:"); 4402 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 4403 pr_cont(" %s", torture_ops[i]->name); 4404 pr_cont("\n"); 4405 firsterr = -EINVAL; 4406 cur_ops = NULL; 4407 goto unwind; 4408 } 4409 if (cur_ops->fqs == NULL && fqs_duration != 0) { 4410 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 4411 fqs_duration = 0; 4412 } 4413 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops || 4414 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) { 4415 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", 4416 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); 4417 nocbs_nthreads = 0; 4418 } 4419 if (cur_ops->init) 4420 cur_ops->init(); 4421 4422 rcu_torture_init_srcu_lockdep(); 4423 4424 if (nfakewriters >= 0) { 4425 nrealfakewriters = nfakewriters; 4426 } else { 4427 nrealfakewriters = num_online_cpus() - 2 - nfakewriters; 4428 if (nrealfakewriters <= 0) 4429 nrealfakewriters = 1; 4430 } 4431 4432 if (nreaders >= 0) { 4433 nrealreaders = nreaders; 4434 } else { 4435 nrealreaders = num_online_cpus() - 2 - nreaders; 4436 if (nrealreaders <= 0) 4437 nrealreaders = 1; 4438 } 4439 rcu_torture_print_module_parms(cur_ops, "Start of test"); 4440 if (cur_ops->get_gp_data) 4441 cur_ops->get_gp_data(&flags, &gp_seq); 4442 start_gp_seq = gp_seq; 4443 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 4444 cur_ops->name, (long)gp_seq, flags); 4445 4446 /* Set up the freelist. */ 4447 4448 INIT_LIST_HEAD(&rcu_torture_freelist); 4449 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 4450 rcu_tortures[i].rtort_mbtest = 0; 4451 list_add_tail(&rcu_tortures[i].rtort_free, 4452 &rcu_torture_freelist); 4453 } 4454 4455 /* Initialize the statistics so that each run gets its own numbers. */ 4456 4457 rcu_torture_current = NULL; 4458 rcu_torture_current_version = 0; 4459 atomic_set(&n_rcu_torture_alloc, 0); 4460 atomic_set(&n_rcu_torture_alloc_fail, 0); 4461 atomic_set(&n_rcu_torture_free, 0); 4462 atomic_set(&n_rcu_torture_mberror, 0); 4463 atomic_set(&n_rcu_torture_mbchk_fail, 0); 4464 atomic_set(&n_rcu_torture_mbchk_tries, 0); 4465 atomic_set(&n_rcu_torture_error, 0); 4466 n_rcu_torture_barrier_error = 0; 4467 n_rcu_torture_boost_ktrerror = 0; 4468 n_rcu_torture_boost_failure = 0; 4469 n_rcu_torture_boosts = 0; 4470 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 4471 atomic_set(&rcu_torture_wcount[i], 0); 4472 for_each_possible_cpu(cpu) { 4473 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 4474 per_cpu(rcu_torture_count, cpu)[i] = 0; 4475 per_cpu(rcu_torture_batch, cpu)[i] = 0; 4476 } 4477 } 4478 err_segs_recorded = 0; 4479 rt_read_nsegs = 0; 4480 4481 /* Start up the kthreads. */ 4482 4483 rcu_torture_write_types(); 4484 if (nrealfakewriters > 0) { 4485 fakewriter_tasks = kcalloc(nrealfakewriters, 4486 sizeof(fakewriter_tasks[0]), 4487 GFP_KERNEL); 4488 if (fakewriter_tasks == NULL) { 4489 TOROUT_ERRSTRING("out of memory"); 4490 firsterr = -ENOMEM; 4491 goto unwind; 4492 } 4493 } 4494 for (i = 0; i < nrealfakewriters; i++) { 4495 firsterr = torture_create_kthread(rcu_torture_fakewriter, 4496 NULL, fakewriter_tasks[i]); 4497 if (torture_init_error(firsterr)) 4498 goto unwind; 4499 } 4500 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 4501 GFP_KERNEL); 4502 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 4503 GFP_KERNEL); 4504 if (!reader_tasks || !rcu_torture_reader_mbchk) { 4505 TOROUT_ERRSTRING("out of memory"); 4506 firsterr = -ENOMEM; 4507 goto unwind; 4508 } 4509 for (i = 0; i < nrealreaders; i++) { 4510 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 4511 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 4512 reader_tasks[i]); 4513 if (torture_init_error(firsterr)) 4514 goto unwind; 4515 } 4516 4517 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 4518 writer_task); 4519 if (torture_init_error(firsterr)) 4520 goto unwind; 4521 4522 firsterr = rcu_torture_updown_init(); 4523 if (torture_init_error(firsterr)) 4524 goto unwind; 4525 nrealnocbers = nocbs_nthreads; 4526 if (WARN_ON(nrealnocbers < 0)) 4527 nrealnocbers = 1; 4528 if (WARN_ON(nocbs_toggle < 0)) 4529 nocbs_toggle = HZ; 4530 if (nrealnocbers > 0) { 4531 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 4532 if (nocb_tasks == NULL) { 4533 TOROUT_ERRSTRING("out of memory"); 4534 firsterr = -ENOMEM; 4535 goto unwind; 4536 } 4537 } else { 4538 nocb_tasks = NULL; 4539 } 4540 for (i = 0; i < nrealnocbers; i++) { 4541 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 4542 if (torture_init_error(firsterr)) 4543 goto unwind; 4544 } 4545 if (stat_interval > 0) { 4546 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 4547 stats_task); 4548 if (torture_init_error(firsterr)) 4549 goto unwind; 4550 } 4551 if (test_no_idle_hz && shuffle_interval > 0) { 4552 firsterr = torture_shuffle_init(shuffle_interval * HZ); 4553 if (torture_init_error(firsterr)) 4554 goto unwind; 4555 } 4556 if (stutter < 0) 4557 stutter = 0; 4558 if (stutter) { 4559 int t; 4560 4561 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 4562 firsterr = torture_stutter_init(stutter * HZ, t); 4563 if (torture_init_error(firsterr)) 4564 goto unwind; 4565 } 4566 if (fqs_duration < 0) 4567 fqs_duration = 0; 4568 if (fqs_holdoff < 0) 4569 fqs_holdoff = 0; 4570 if (fqs_duration && fqs_holdoff) { 4571 /* Create the fqs thread */ 4572 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 4573 fqs_task); 4574 if (torture_init_error(firsterr)) 4575 goto unwind; 4576 } 4577 if (test_boost_interval < 1) 4578 test_boost_interval = 1; 4579 if (test_boost_duration < 2) 4580 test_boost_duration = 2; 4581 if (rcu_torture_can_boost()) { 4582 4583 boost_starttime = jiffies + test_boost_interval * HZ; 4584 4585 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 4586 rcutorture_booster_init, 4587 rcutorture_booster_cleanup); 4588 rcutor_hp = firsterr; 4589 if (torture_init_error(firsterr)) 4590 goto unwind; 4591 } 4592 shutdown_jiffies = jiffies + shutdown_secs * HZ; 4593 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 4594 if (torture_init_error(firsterr)) 4595 goto unwind; 4596 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 4597 rcutorture_sync); 4598 if (torture_init_error(firsterr)) 4599 goto unwind; 4600 firsterr = rcu_torture_stall_init(); 4601 if (torture_init_error(firsterr)) 4602 goto unwind; 4603 firsterr = rcu_torture_fwd_prog_init(); 4604 if (torture_init_error(firsterr)) 4605 goto unwind; 4606 firsterr = rcu_torture_barrier_init(); 4607 if (torture_init_error(firsterr)) 4608 goto unwind; 4609 firsterr = rcu_torture_read_exit_init(); 4610 if (torture_init_error(firsterr)) 4611 goto unwind; 4612 if (preempt_duration > 0) { 4613 firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task); 4614 if (torture_init_error(firsterr)) 4615 goto unwind; 4616 } 4617 if (object_debug) 4618 rcu_test_debug_objects(); 4619 4620 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) 4621 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); 4622 4623 if (gpwrap_lag && cur_ops->set_gpwrap_lag) { 4624 firsterr = rcu_gpwrap_lag_init(); 4625 if (torture_init_error(firsterr)) 4626 goto unwind; 4627 } 4628 4629 torture_init_end(); 4630 return 0; 4631 4632 unwind: 4633 torture_init_end(); 4634 rcu_torture_cleanup(); 4635 if (shutdown_secs) { 4636 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 4637 kernel_power_off(); 4638 } 4639 return firsterr; 4640 } 4641 4642 module_init(rcu_torture_init); 4643 module_exit(rcu_torture_cleanup); 4644