1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 #include <linux/nmi.h> 50 51 #include "rcu.h" 52 53 MODULE_LICENSE("GPL"); 54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 55 56 /* Bits for ->extendables field, extendables param, and related definitions. */ 57 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 58 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 59 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 61 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 62 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 63 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 64 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 65 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 66 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 67 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 68 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 69 #define RCUTORTURE_MAX_EXTEND \ 70 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 71 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 72 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 73 /* Must be power of two minus one. */ 74 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 75 76 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 77 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 78 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); 79 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 80 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 81 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); 82 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 83 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); 84 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); 85 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 86 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); 87 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives"); 88 torture_param(bool, gp_cond_exp_full, false, 89 "Use conditional/async full-stateexpedited GP wait primitives"); 90 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 91 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); 92 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 93 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); 94 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives"); 95 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives"); 96 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 97 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 98 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 99 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); 100 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 101 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 102 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); 103 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 104 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); 105 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 106 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 107 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); 108 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); 109 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 110 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 111 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 112 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); 113 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); 114 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 115 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 116 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); 117 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 118 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 120 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); 121 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); 122 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); 123 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 124 125 static char *torture_type = "rcu"; 126 module_param(torture_type, charp, 0444); 127 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 128 129 static int nrealnocbers; 130 static int nrealreaders; 131 static struct task_struct *writer_task; 132 static struct task_struct **fakewriter_tasks; 133 static struct task_struct **reader_tasks; 134 static struct task_struct **nocb_tasks; 135 static struct task_struct *stats_task; 136 static struct task_struct *fqs_task; 137 static struct task_struct *boost_tasks[NR_CPUS]; 138 static struct task_struct *stall_task; 139 static struct task_struct **fwd_prog_tasks; 140 static struct task_struct **barrier_cbs_tasks; 141 static struct task_struct *barrier_task; 142 static struct task_struct *read_exit_task; 143 144 #define RCU_TORTURE_PIPE_LEN 10 145 146 // Mailbox-like structure to check RCU global memory ordering. 147 struct rcu_torture_reader_check { 148 unsigned long rtc_myloops; 149 int rtc_chkrdr; 150 unsigned long rtc_chkloops; 151 int rtc_ready; 152 struct rcu_torture_reader_check *rtc_assigner; 153 } ____cacheline_internodealigned_in_smp; 154 155 // Update-side data structure used to check RCU readers. 156 struct rcu_torture { 157 struct rcu_head rtort_rcu; 158 int rtort_pipe_count; 159 struct list_head rtort_free; 160 int rtort_mbtest; 161 struct rcu_torture_reader_check *rtort_chkp; 162 }; 163 164 static LIST_HEAD(rcu_torture_freelist); 165 static struct rcu_torture __rcu *rcu_torture_current; 166 static unsigned long rcu_torture_current_version; 167 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 168 static DEFINE_SPINLOCK(rcu_torture_lock); 169 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 170 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 171 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 172 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 173 static atomic_t n_rcu_torture_alloc; 174 static atomic_t n_rcu_torture_alloc_fail; 175 static atomic_t n_rcu_torture_free; 176 static atomic_t n_rcu_torture_mberror; 177 static atomic_t n_rcu_torture_mbchk_fail; 178 static atomic_t n_rcu_torture_mbchk_tries; 179 static atomic_t n_rcu_torture_error; 180 static long n_rcu_torture_barrier_error; 181 static long n_rcu_torture_boost_ktrerror; 182 static long n_rcu_torture_boost_rterror; 183 static long n_rcu_torture_boost_failure; 184 static long n_rcu_torture_boosts; 185 static atomic_long_t n_rcu_torture_timers; 186 static long n_barrier_attempts; 187 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 188 static unsigned long n_read_exits; 189 static struct list_head rcu_torture_removed; 190 static unsigned long shutdown_jiffies; 191 static unsigned long start_gp_seq; 192 static atomic_long_t n_nocb_offload; 193 static atomic_long_t n_nocb_deoffload; 194 195 static int rcu_torture_writer_state; 196 #define RTWS_FIXED_DELAY 0 197 #define RTWS_DELAY 1 198 #define RTWS_REPLACE 2 199 #define RTWS_DEF_FREE 3 200 #define RTWS_EXP_SYNC 4 201 #define RTWS_COND_GET 5 202 #define RTWS_COND_GET_FULL 6 203 #define RTWS_COND_GET_EXP 7 204 #define RTWS_COND_GET_EXP_FULL 8 205 #define RTWS_COND_SYNC 9 206 #define RTWS_COND_SYNC_FULL 10 207 #define RTWS_COND_SYNC_EXP 11 208 #define RTWS_COND_SYNC_EXP_FULL 12 209 #define RTWS_POLL_GET 13 210 #define RTWS_POLL_GET_FULL 14 211 #define RTWS_POLL_GET_EXP 15 212 #define RTWS_POLL_GET_EXP_FULL 16 213 #define RTWS_POLL_WAIT 17 214 #define RTWS_POLL_WAIT_FULL 18 215 #define RTWS_POLL_WAIT_EXP 19 216 #define RTWS_POLL_WAIT_EXP_FULL 20 217 #define RTWS_SYNC 21 218 #define RTWS_STUTTER 22 219 #define RTWS_STOPPING 23 220 static const char * const rcu_torture_writer_state_names[] = { 221 "RTWS_FIXED_DELAY", 222 "RTWS_DELAY", 223 "RTWS_REPLACE", 224 "RTWS_DEF_FREE", 225 "RTWS_EXP_SYNC", 226 "RTWS_COND_GET", 227 "RTWS_COND_GET_FULL", 228 "RTWS_COND_GET_EXP", 229 "RTWS_COND_GET_EXP_FULL", 230 "RTWS_COND_SYNC", 231 "RTWS_COND_SYNC_FULL", 232 "RTWS_COND_SYNC_EXP", 233 "RTWS_COND_SYNC_EXP_FULL", 234 "RTWS_POLL_GET", 235 "RTWS_POLL_GET_FULL", 236 "RTWS_POLL_GET_EXP", 237 "RTWS_POLL_GET_EXP_FULL", 238 "RTWS_POLL_WAIT", 239 "RTWS_POLL_WAIT_FULL", 240 "RTWS_POLL_WAIT_EXP", 241 "RTWS_POLL_WAIT_EXP_FULL", 242 "RTWS_SYNC", 243 "RTWS_STUTTER", 244 "RTWS_STOPPING", 245 }; 246 247 /* Record reader segment types and duration for first failing read. */ 248 struct rt_read_seg { 249 int rt_readstate; 250 unsigned long rt_delay_jiffies; 251 unsigned long rt_delay_ms; 252 unsigned long rt_delay_us; 253 bool rt_preempted; 254 }; 255 static int err_segs_recorded; 256 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 257 static int rt_read_nsegs; 258 259 static const char *rcu_torture_writer_state_getname(void) 260 { 261 unsigned int i = READ_ONCE(rcu_torture_writer_state); 262 263 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 264 return "???"; 265 return rcu_torture_writer_state_names[i]; 266 } 267 268 #ifdef CONFIG_RCU_TRACE 269 static u64 notrace rcu_trace_clock_local(void) 270 { 271 u64 ts = trace_clock_local(); 272 273 (void)do_div(ts, NSEC_PER_USEC); 274 return ts; 275 } 276 #else /* #ifdef CONFIG_RCU_TRACE */ 277 static u64 notrace rcu_trace_clock_local(void) 278 { 279 return 0ULL; 280 } 281 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 282 283 /* 284 * Stop aggressive CPU-hog tests a bit before the end of the test in order 285 * to avoid interfering with test shutdown. 286 */ 287 static bool shutdown_time_arrived(void) 288 { 289 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 290 } 291 292 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 293 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 294 /* and boost task create/destroy. */ 295 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 296 static bool barrier_phase; /* Test phase. */ 297 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 298 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 299 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 300 301 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 302 303 /* 304 * Allocate an element from the rcu_tortures pool. 305 */ 306 static struct rcu_torture * 307 rcu_torture_alloc(void) 308 { 309 struct list_head *p; 310 311 spin_lock_bh(&rcu_torture_lock); 312 if (list_empty(&rcu_torture_freelist)) { 313 atomic_inc(&n_rcu_torture_alloc_fail); 314 spin_unlock_bh(&rcu_torture_lock); 315 return NULL; 316 } 317 atomic_inc(&n_rcu_torture_alloc); 318 p = rcu_torture_freelist.next; 319 list_del_init(p); 320 spin_unlock_bh(&rcu_torture_lock); 321 return container_of(p, struct rcu_torture, rtort_free); 322 } 323 324 /* 325 * Free an element to the rcu_tortures pool. 326 */ 327 static void 328 rcu_torture_free(struct rcu_torture *p) 329 { 330 atomic_inc(&n_rcu_torture_free); 331 spin_lock_bh(&rcu_torture_lock); 332 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 333 spin_unlock_bh(&rcu_torture_lock); 334 } 335 336 /* 337 * Operations vector for selecting different types of tests. 338 */ 339 340 struct rcu_torture_ops { 341 int ttype; 342 void (*init)(void); 343 void (*cleanup)(void); 344 int (*readlock)(void); 345 void (*read_delay)(struct torture_random_state *rrsp, 346 struct rt_read_seg *rtrsp); 347 void (*readunlock)(int idx); 348 int (*readlock_held)(void); 349 unsigned long (*get_gp_seq)(void); 350 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 351 void (*deferred_free)(struct rcu_torture *p); 352 void (*sync)(void); 353 void (*exp_sync)(void); 354 unsigned long (*get_gp_state_exp)(void); 355 unsigned long (*start_gp_poll_exp)(void); 356 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); 357 bool (*poll_gp_state_exp)(unsigned long oldstate); 358 void (*cond_sync_exp)(unsigned long oldstate); 359 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); 360 unsigned long (*get_comp_state)(void); 361 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); 362 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2); 363 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); 364 unsigned long (*get_gp_state)(void); 365 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); 366 unsigned long (*get_gp_completed)(void); 367 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp); 368 unsigned long (*start_gp_poll)(void); 369 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); 370 bool (*poll_gp_state)(unsigned long oldstate); 371 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); 372 bool (*poll_need_2gp)(bool poll, bool poll_full); 373 void (*cond_sync)(unsigned long oldstate); 374 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); 375 call_rcu_func_t call; 376 void (*cb_barrier)(void); 377 void (*fqs)(void); 378 void (*stats)(void); 379 void (*gp_kthread_dbg)(void); 380 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 381 int (*stall_dur)(void); 382 long cbflood_max; 383 int irq_capable; 384 int can_boost; 385 int extendables; 386 int slow_gps; 387 int no_pi_lock; 388 const char *name; 389 }; 390 391 static struct rcu_torture_ops *cur_ops; 392 393 /* 394 * Definitions for rcu torture testing. 395 */ 396 397 static int torture_readlock_not_held(void) 398 { 399 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 400 } 401 402 static int rcu_torture_read_lock(void) __acquires(RCU) 403 { 404 rcu_read_lock(); 405 return 0; 406 } 407 408 static void 409 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 410 { 411 unsigned long started; 412 unsigned long completed; 413 const unsigned long shortdelay_us = 200; 414 unsigned long longdelay_ms = 300; 415 unsigned long long ts; 416 417 /* We want a short delay sometimes to make a reader delay the grace 418 * period, and we want a long delay occasionally to trigger 419 * force_quiescent_state. */ 420 421 if (!atomic_read(&rcu_fwd_cb_nodelay) && 422 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 423 started = cur_ops->get_gp_seq(); 424 ts = rcu_trace_clock_local(); 425 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 426 longdelay_ms = 5; /* Avoid triggering BH limits. */ 427 mdelay(longdelay_ms); 428 rtrsp->rt_delay_ms = longdelay_ms; 429 completed = cur_ops->get_gp_seq(); 430 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 431 started, completed); 432 } 433 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 434 udelay(shortdelay_us); 435 rtrsp->rt_delay_us = shortdelay_us; 436 } 437 if (!preempt_count() && 438 !(torture_random(rrsp) % (nrealreaders * 500))) { 439 torture_preempt_schedule(); /* QS only if preemptible. */ 440 rtrsp->rt_preempted = true; 441 } 442 } 443 444 static void rcu_torture_read_unlock(int idx) __releases(RCU) 445 { 446 rcu_read_unlock(); 447 } 448 449 /* 450 * Update callback in the pipe. This should be invoked after a grace period. 451 */ 452 static bool 453 rcu_torture_pipe_update_one(struct rcu_torture *rp) 454 { 455 int i; 456 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 457 458 if (rtrcp) { 459 WRITE_ONCE(rp->rtort_chkp, NULL); 460 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 461 } 462 i = READ_ONCE(rp->rtort_pipe_count); 463 if (i > RCU_TORTURE_PIPE_LEN) 464 i = RCU_TORTURE_PIPE_LEN; 465 atomic_inc(&rcu_torture_wcount[i]); 466 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 467 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 468 rp->rtort_mbtest = 0; 469 return true; 470 } 471 return false; 472 } 473 474 /* 475 * Update all callbacks in the pipe. Suitable for synchronous grace-period 476 * primitives. 477 */ 478 static void 479 rcu_torture_pipe_update(struct rcu_torture *old_rp) 480 { 481 struct rcu_torture *rp; 482 struct rcu_torture *rp1; 483 484 if (old_rp) 485 list_add(&old_rp->rtort_free, &rcu_torture_removed); 486 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 487 if (rcu_torture_pipe_update_one(rp)) { 488 list_del(&rp->rtort_free); 489 rcu_torture_free(rp); 490 } 491 } 492 } 493 494 static void 495 rcu_torture_cb(struct rcu_head *p) 496 { 497 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 498 499 if (torture_must_stop_irq()) { 500 /* Test is ending, just drop callbacks on the floor. */ 501 /* The next initialization will pick up the pieces. */ 502 return; 503 } 504 if (rcu_torture_pipe_update_one(rp)) 505 rcu_torture_free(rp); 506 else 507 cur_ops->deferred_free(rp); 508 } 509 510 static unsigned long rcu_no_completed(void) 511 { 512 return 0; 513 } 514 515 static void rcu_torture_deferred_free(struct rcu_torture *p) 516 { 517 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); 518 } 519 520 static void rcu_sync_torture_init(void) 521 { 522 INIT_LIST_HEAD(&rcu_torture_removed); 523 } 524 525 static bool rcu_poll_need_2gp(bool poll, bool poll_full) 526 { 527 return poll; 528 } 529 530 static struct rcu_torture_ops rcu_ops = { 531 .ttype = RCU_FLAVOR, 532 .init = rcu_sync_torture_init, 533 .readlock = rcu_torture_read_lock, 534 .read_delay = rcu_read_delay, 535 .readunlock = rcu_torture_read_unlock, 536 .readlock_held = torture_readlock_not_held, 537 .get_gp_seq = rcu_get_gp_seq, 538 .gp_diff = rcu_seq_diff, 539 .deferred_free = rcu_torture_deferred_free, 540 .sync = synchronize_rcu, 541 .exp_sync = synchronize_rcu_expedited, 542 .same_gp_state = same_state_synchronize_rcu, 543 .same_gp_state_full = same_state_synchronize_rcu_full, 544 .get_comp_state = get_completed_synchronize_rcu, 545 .get_comp_state_full = get_completed_synchronize_rcu_full, 546 .get_gp_state = get_state_synchronize_rcu, 547 .get_gp_state_full = get_state_synchronize_rcu_full, 548 .get_gp_completed = get_completed_synchronize_rcu, 549 .get_gp_completed_full = get_completed_synchronize_rcu_full, 550 .start_gp_poll = start_poll_synchronize_rcu, 551 .start_gp_poll_full = start_poll_synchronize_rcu_full, 552 .poll_gp_state = poll_state_synchronize_rcu, 553 .poll_gp_state_full = poll_state_synchronize_rcu_full, 554 .poll_need_2gp = rcu_poll_need_2gp, 555 .cond_sync = cond_synchronize_rcu, 556 .cond_sync_full = cond_synchronize_rcu_full, 557 .get_gp_state_exp = get_state_synchronize_rcu, 558 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, 559 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full, 560 .poll_gp_state_exp = poll_state_synchronize_rcu, 561 .cond_sync_exp = cond_synchronize_rcu_expedited, 562 .call = call_rcu_hurry, 563 .cb_barrier = rcu_barrier, 564 .fqs = rcu_force_quiescent_state, 565 .stats = NULL, 566 .gp_kthread_dbg = show_rcu_gp_kthreads, 567 .check_boost_failed = rcu_check_boost_fail, 568 .stall_dur = rcu_jiffies_till_stall_check, 569 .irq_capable = 1, 570 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 571 .extendables = RCUTORTURE_MAX_EXTEND, 572 .name = "rcu" 573 }; 574 575 /* 576 * Don't even think about trying any of these in real life!!! 577 * The names includes "busted", and they really means it! 578 * The only purpose of these functions is to provide a buggy RCU 579 * implementation to make sure that rcutorture correctly emits 580 * buggy-RCU error messages. 581 */ 582 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 583 { 584 /* This is a deliberate bug for testing purposes only! */ 585 rcu_torture_cb(&p->rtort_rcu); 586 } 587 588 static void synchronize_rcu_busted(void) 589 { 590 /* This is a deliberate bug for testing purposes only! */ 591 } 592 593 static void 594 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 595 { 596 /* This is a deliberate bug for testing purposes only! */ 597 func(head); 598 } 599 600 static struct rcu_torture_ops rcu_busted_ops = { 601 .ttype = INVALID_RCU_FLAVOR, 602 .init = rcu_sync_torture_init, 603 .readlock = rcu_torture_read_lock, 604 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 605 .readunlock = rcu_torture_read_unlock, 606 .readlock_held = torture_readlock_not_held, 607 .get_gp_seq = rcu_no_completed, 608 .deferred_free = rcu_busted_torture_deferred_free, 609 .sync = synchronize_rcu_busted, 610 .exp_sync = synchronize_rcu_busted, 611 .call = call_rcu_busted, 612 .cb_barrier = NULL, 613 .fqs = NULL, 614 .stats = NULL, 615 .irq_capable = 1, 616 .name = "busted" 617 }; 618 619 /* 620 * Definitions for srcu torture testing. 621 */ 622 623 DEFINE_STATIC_SRCU(srcu_ctl); 624 static struct srcu_struct srcu_ctld; 625 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 626 static struct rcu_torture_ops srcud_ops; 627 628 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 629 { 630 if (cur_ops == &srcud_ops) 631 return srcu_read_lock_nmisafe(srcu_ctlp); 632 else 633 return srcu_read_lock(srcu_ctlp); 634 } 635 636 static void 637 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 638 { 639 long delay; 640 const long uspertick = 1000000 / HZ; 641 const long longdelay = 10; 642 643 /* We want there to be long-running readers, but not all the time. */ 644 645 delay = torture_random(rrsp) % 646 (nrealreaders * 2 * longdelay * uspertick); 647 if (!delay && in_task()) { 648 schedule_timeout_interruptible(longdelay); 649 rtrsp->rt_delay_jiffies = longdelay; 650 } else { 651 rcu_read_delay(rrsp, rtrsp); 652 } 653 } 654 655 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 656 { 657 if (cur_ops == &srcud_ops) 658 srcu_read_unlock_nmisafe(srcu_ctlp, idx); 659 else 660 srcu_read_unlock(srcu_ctlp, idx); 661 } 662 663 static int torture_srcu_read_lock_held(void) 664 { 665 return srcu_read_lock_held(srcu_ctlp); 666 } 667 668 static unsigned long srcu_torture_completed(void) 669 { 670 return srcu_batches_completed(srcu_ctlp); 671 } 672 673 static void srcu_torture_deferred_free(struct rcu_torture *rp) 674 { 675 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 676 } 677 678 static void srcu_torture_synchronize(void) 679 { 680 synchronize_srcu(srcu_ctlp); 681 } 682 683 static unsigned long srcu_torture_get_gp_state(void) 684 { 685 return get_state_synchronize_srcu(srcu_ctlp); 686 } 687 688 static unsigned long srcu_torture_start_gp_poll(void) 689 { 690 return start_poll_synchronize_srcu(srcu_ctlp); 691 } 692 693 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 694 { 695 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 696 } 697 698 static void srcu_torture_call(struct rcu_head *head, 699 rcu_callback_t func) 700 { 701 call_srcu(srcu_ctlp, head, func); 702 } 703 704 static void srcu_torture_barrier(void) 705 { 706 srcu_barrier(srcu_ctlp); 707 } 708 709 static void srcu_torture_stats(void) 710 { 711 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 712 } 713 714 static void srcu_torture_synchronize_expedited(void) 715 { 716 synchronize_srcu_expedited(srcu_ctlp); 717 } 718 719 static struct rcu_torture_ops srcu_ops = { 720 .ttype = SRCU_FLAVOR, 721 .init = rcu_sync_torture_init, 722 .readlock = srcu_torture_read_lock, 723 .read_delay = srcu_read_delay, 724 .readunlock = srcu_torture_read_unlock, 725 .readlock_held = torture_srcu_read_lock_held, 726 .get_gp_seq = srcu_torture_completed, 727 .deferred_free = srcu_torture_deferred_free, 728 .sync = srcu_torture_synchronize, 729 .exp_sync = srcu_torture_synchronize_expedited, 730 .get_gp_state = srcu_torture_get_gp_state, 731 .start_gp_poll = srcu_torture_start_gp_poll, 732 .poll_gp_state = srcu_torture_poll_gp_state, 733 .call = srcu_torture_call, 734 .cb_barrier = srcu_torture_barrier, 735 .stats = srcu_torture_stats, 736 .cbflood_max = 50000, 737 .irq_capable = 1, 738 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 739 .name = "srcu" 740 }; 741 742 static void srcu_torture_init(void) 743 { 744 rcu_sync_torture_init(); 745 WARN_ON(init_srcu_struct(&srcu_ctld)); 746 srcu_ctlp = &srcu_ctld; 747 } 748 749 static void srcu_torture_cleanup(void) 750 { 751 cleanup_srcu_struct(&srcu_ctld); 752 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 753 } 754 755 /* As above, but dynamically allocated. */ 756 static struct rcu_torture_ops srcud_ops = { 757 .ttype = SRCU_FLAVOR, 758 .init = srcu_torture_init, 759 .cleanup = srcu_torture_cleanup, 760 .readlock = srcu_torture_read_lock, 761 .read_delay = srcu_read_delay, 762 .readunlock = srcu_torture_read_unlock, 763 .readlock_held = torture_srcu_read_lock_held, 764 .get_gp_seq = srcu_torture_completed, 765 .deferred_free = srcu_torture_deferred_free, 766 .sync = srcu_torture_synchronize, 767 .exp_sync = srcu_torture_synchronize_expedited, 768 .get_gp_state = srcu_torture_get_gp_state, 769 .start_gp_poll = srcu_torture_start_gp_poll, 770 .poll_gp_state = srcu_torture_poll_gp_state, 771 .call = srcu_torture_call, 772 .cb_barrier = srcu_torture_barrier, 773 .stats = srcu_torture_stats, 774 .cbflood_max = 50000, 775 .irq_capable = 1, 776 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 777 .name = "srcud" 778 }; 779 780 /* As above, but broken due to inappropriate reader extension. */ 781 static struct rcu_torture_ops busted_srcud_ops = { 782 .ttype = SRCU_FLAVOR, 783 .init = srcu_torture_init, 784 .cleanup = srcu_torture_cleanup, 785 .readlock = srcu_torture_read_lock, 786 .read_delay = rcu_read_delay, 787 .readunlock = srcu_torture_read_unlock, 788 .readlock_held = torture_srcu_read_lock_held, 789 .get_gp_seq = srcu_torture_completed, 790 .deferred_free = srcu_torture_deferred_free, 791 .sync = srcu_torture_synchronize, 792 .exp_sync = srcu_torture_synchronize_expedited, 793 .call = srcu_torture_call, 794 .cb_barrier = srcu_torture_barrier, 795 .stats = srcu_torture_stats, 796 .irq_capable = 1, 797 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 798 .extendables = RCUTORTURE_MAX_EXTEND, 799 .name = "busted_srcud" 800 }; 801 802 /* 803 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 804 * This implementation does not necessarily work well with CPU hotplug. 805 */ 806 807 static void synchronize_rcu_trivial(void) 808 { 809 int cpu; 810 811 for_each_online_cpu(cpu) { 812 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 813 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 814 } 815 } 816 817 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 818 { 819 preempt_disable(); 820 return 0; 821 } 822 823 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 824 { 825 preempt_enable(); 826 } 827 828 static struct rcu_torture_ops trivial_ops = { 829 .ttype = RCU_TRIVIAL_FLAVOR, 830 .init = rcu_sync_torture_init, 831 .readlock = rcu_torture_read_lock_trivial, 832 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 833 .readunlock = rcu_torture_read_unlock_trivial, 834 .readlock_held = torture_readlock_not_held, 835 .get_gp_seq = rcu_no_completed, 836 .sync = synchronize_rcu_trivial, 837 .exp_sync = synchronize_rcu_trivial, 838 .fqs = NULL, 839 .stats = NULL, 840 .irq_capable = 1, 841 .name = "trivial" 842 }; 843 844 #ifdef CONFIG_TASKS_RCU 845 846 /* 847 * Definitions for RCU-tasks torture testing. 848 */ 849 850 static int tasks_torture_read_lock(void) 851 { 852 return 0; 853 } 854 855 static void tasks_torture_read_unlock(int idx) 856 { 857 } 858 859 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 860 { 861 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 862 } 863 864 static void synchronize_rcu_mult_test(void) 865 { 866 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry); 867 } 868 869 static struct rcu_torture_ops tasks_ops = { 870 .ttype = RCU_TASKS_FLAVOR, 871 .init = rcu_sync_torture_init, 872 .readlock = tasks_torture_read_lock, 873 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 874 .readunlock = tasks_torture_read_unlock, 875 .get_gp_seq = rcu_no_completed, 876 .deferred_free = rcu_tasks_torture_deferred_free, 877 .sync = synchronize_rcu_tasks, 878 .exp_sync = synchronize_rcu_mult_test, 879 .call = call_rcu_tasks, 880 .cb_barrier = rcu_barrier_tasks, 881 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 882 .fqs = NULL, 883 .stats = NULL, 884 .irq_capable = 1, 885 .slow_gps = 1, 886 .name = "tasks" 887 }; 888 889 #define TASKS_OPS &tasks_ops, 890 891 #else // #ifdef CONFIG_TASKS_RCU 892 893 #define TASKS_OPS 894 895 #endif // #else #ifdef CONFIG_TASKS_RCU 896 897 898 #ifdef CONFIG_TASKS_RUDE_RCU 899 900 /* 901 * Definitions for rude RCU-tasks torture testing. 902 */ 903 904 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 905 { 906 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 907 } 908 909 static struct rcu_torture_ops tasks_rude_ops = { 910 .ttype = RCU_TASKS_RUDE_FLAVOR, 911 .init = rcu_sync_torture_init, 912 .readlock = rcu_torture_read_lock_trivial, 913 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 914 .readunlock = rcu_torture_read_unlock_trivial, 915 .get_gp_seq = rcu_no_completed, 916 .deferred_free = rcu_tasks_rude_torture_deferred_free, 917 .sync = synchronize_rcu_tasks_rude, 918 .exp_sync = synchronize_rcu_tasks_rude, 919 .call = call_rcu_tasks_rude, 920 .cb_barrier = rcu_barrier_tasks_rude, 921 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 922 .cbflood_max = 50000, 923 .fqs = NULL, 924 .stats = NULL, 925 .irq_capable = 1, 926 .name = "tasks-rude" 927 }; 928 929 #define TASKS_RUDE_OPS &tasks_rude_ops, 930 931 #else // #ifdef CONFIG_TASKS_RUDE_RCU 932 933 #define TASKS_RUDE_OPS 934 935 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU 936 937 938 #ifdef CONFIG_TASKS_TRACE_RCU 939 940 /* 941 * Definitions for tracing RCU-tasks torture testing. 942 */ 943 944 static int tasks_tracing_torture_read_lock(void) 945 { 946 rcu_read_lock_trace(); 947 return 0; 948 } 949 950 static void tasks_tracing_torture_read_unlock(int idx) 951 { 952 rcu_read_unlock_trace(); 953 } 954 955 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 956 { 957 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 958 } 959 960 static struct rcu_torture_ops tasks_tracing_ops = { 961 .ttype = RCU_TASKS_TRACING_FLAVOR, 962 .init = rcu_sync_torture_init, 963 .readlock = tasks_tracing_torture_read_lock, 964 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 965 .readunlock = tasks_tracing_torture_read_unlock, 966 .readlock_held = rcu_read_lock_trace_held, 967 .get_gp_seq = rcu_no_completed, 968 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 969 .sync = synchronize_rcu_tasks_trace, 970 .exp_sync = synchronize_rcu_tasks_trace, 971 .call = call_rcu_tasks_trace, 972 .cb_barrier = rcu_barrier_tasks_trace, 973 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 974 .cbflood_max = 50000, 975 .fqs = NULL, 976 .stats = NULL, 977 .irq_capable = 1, 978 .slow_gps = 1, 979 .name = "tasks-tracing" 980 }; 981 982 #define TASKS_TRACING_OPS &tasks_tracing_ops, 983 984 #else // #ifdef CONFIG_TASKS_TRACE_RCU 985 986 #define TASKS_TRACING_OPS 987 988 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 989 990 991 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 992 { 993 if (!cur_ops->gp_diff) 994 return new - old; 995 return cur_ops->gp_diff(new, old); 996 } 997 998 /* 999 * RCU torture priority-boost testing. Runs one real-time thread per 1000 * CPU for moderate bursts, repeatedly starting grace periods and waiting 1001 * for them to complete. If a given grace period takes too long, we assume 1002 * that priority inversion has occurred. 1003 */ 1004 1005 static int old_rt_runtime = -1; 1006 1007 static void rcu_torture_disable_rt_throttle(void) 1008 { 1009 /* 1010 * Disable RT throttling so that rcutorture's boost threads don't get 1011 * throttled. Only possible if rcutorture is built-in otherwise the 1012 * user should manually do this by setting the sched_rt_period_us and 1013 * sched_rt_runtime sysctls. 1014 */ 1015 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 1016 return; 1017 1018 old_rt_runtime = sysctl_sched_rt_runtime; 1019 sysctl_sched_rt_runtime = -1; 1020 } 1021 1022 static void rcu_torture_enable_rt_throttle(void) 1023 { 1024 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 1025 return; 1026 1027 sysctl_sched_rt_runtime = old_rt_runtime; 1028 old_rt_runtime = -1; 1029 } 1030 1031 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 1032 { 1033 int cpu; 1034 static int dbg_done; 1035 unsigned long end = jiffies; 1036 bool gp_done; 1037 unsigned long j; 1038 static unsigned long last_persist; 1039 unsigned long lp; 1040 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 1041 1042 if (end - *start > mininterval) { 1043 // Recheck after checking time to avoid false positives. 1044 smp_mb(); // Time check before grace-period check. 1045 if (cur_ops->poll_gp_state(gp_state)) 1046 return false; // passed, though perhaps just barely 1047 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 1048 // At most one persisted message per boost test. 1049 j = jiffies; 1050 lp = READ_ONCE(last_persist); 1051 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 1052 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 1053 return false; // passed on a technicality 1054 } 1055 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 1056 n_rcu_torture_boost_failure++; 1057 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 1058 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 1059 current->rt_priority, gp_state, end - *start); 1060 cur_ops->gp_kthread_dbg(); 1061 // Recheck after print to flag grace period ending during splat. 1062 gp_done = cur_ops->poll_gp_state(gp_state); 1063 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 1064 gp_done ? "ended already" : "still pending"); 1065 1066 } 1067 1068 return true; // failed 1069 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 1070 *start = jiffies; 1071 } 1072 1073 return false; // passed 1074 } 1075 1076 static int rcu_torture_boost(void *arg) 1077 { 1078 unsigned long endtime; 1079 unsigned long gp_state; 1080 unsigned long gp_state_time; 1081 unsigned long oldstarttime; 1082 1083 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1084 1085 /* Set real-time priority. */ 1086 sched_set_fifo_low(current); 1087 1088 /* Each pass through the following loop does one boost-test cycle. */ 1089 do { 1090 bool failed = false; // Test failed already in this test interval 1091 bool gp_initiated = false; 1092 1093 if (kthread_should_stop()) 1094 goto checkwait; 1095 1096 /* Wait for the next test interval. */ 1097 oldstarttime = READ_ONCE(boost_starttime); 1098 while (time_before(jiffies, oldstarttime)) { 1099 schedule_timeout_interruptible(oldstarttime - jiffies); 1100 if (stutter_wait("rcu_torture_boost")) 1101 sched_set_fifo_low(current); 1102 if (torture_must_stop()) 1103 goto checkwait; 1104 } 1105 1106 // Do one boost-test interval. 1107 endtime = oldstarttime + test_boost_duration * HZ; 1108 while (time_before(jiffies, endtime)) { 1109 // Has current GP gone too long? 1110 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1111 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1112 // If we don't have a grace period in flight, start one. 1113 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1114 gp_state = cur_ops->start_gp_poll(); 1115 gp_initiated = true; 1116 gp_state_time = jiffies; 1117 } 1118 if (stutter_wait("rcu_torture_boost")) { 1119 sched_set_fifo_low(current); 1120 // If the grace period already ended, 1121 // we don't know when that happened, so 1122 // start over. 1123 if (cur_ops->poll_gp_state(gp_state)) 1124 gp_initiated = false; 1125 } 1126 if (torture_must_stop()) 1127 goto checkwait; 1128 } 1129 1130 // In case the grace period extended beyond the end of the loop. 1131 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1132 rcu_torture_boost_failed(gp_state, &gp_state_time); 1133 1134 /* 1135 * Set the start time of the next test interval. 1136 * Yes, this is vulnerable to long delays, but such 1137 * delays simply cause a false negative for the next 1138 * interval. Besides, we are running at RT priority, 1139 * so delays should be relatively rare. 1140 */ 1141 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1142 if (mutex_trylock(&boost_mutex)) { 1143 if (oldstarttime == boost_starttime) { 1144 WRITE_ONCE(boost_starttime, 1145 jiffies + test_boost_interval * HZ); 1146 n_rcu_torture_boosts++; 1147 } 1148 mutex_unlock(&boost_mutex); 1149 break; 1150 } 1151 schedule_timeout_uninterruptible(1); 1152 } 1153 1154 /* Go do the stutter. */ 1155 checkwait: if (stutter_wait("rcu_torture_boost")) 1156 sched_set_fifo_low(current); 1157 } while (!torture_must_stop()); 1158 1159 /* Clean up and exit. */ 1160 while (!kthread_should_stop()) { 1161 torture_shutdown_absorb("rcu_torture_boost"); 1162 schedule_timeout_uninterruptible(1); 1163 } 1164 torture_kthread_stopping("rcu_torture_boost"); 1165 return 0; 1166 } 1167 1168 /* 1169 * RCU torture force-quiescent-state kthread. Repeatedly induces 1170 * bursts of calls to force_quiescent_state(), increasing the probability 1171 * of occurrence of some important types of race conditions. 1172 */ 1173 static int 1174 rcu_torture_fqs(void *arg) 1175 { 1176 unsigned long fqs_resume_time; 1177 int fqs_burst_remaining; 1178 int oldnice = task_nice(current); 1179 1180 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1181 do { 1182 fqs_resume_time = jiffies + fqs_stutter * HZ; 1183 while (time_before(jiffies, fqs_resume_time) && 1184 !kthread_should_stop()) { 1185 schedule_timeout_interruptible(1); 1186 } 1187 fqs_burst_remaining = fqs_duration; 1188 while (fqs_burst_remaining > 0 && 1189 !kthread_should_stop()) { 1190 cur_ops->fqs(); 1191 udelay(fqs_holdoff); 1192 fqs_burst_remaining -= fqs_holdoff; 1193 } 1194 if (stutter_wait("rcu_torture_fqs")) 1195 sched_set_normal(current, oldnice); 1196 } while (!torture_must_stop()); 1197 torture_kthread_stopping("rcu_torture_fqs"); 1198 return 0; 1199 } 1200 1201 // Used by writers to randomly choose from the available grace-period primitives. 1202 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; 1203 static int nsynctypes; 1204 1205 /* 1206 * Determine which grace-period primitives are available. 1207 */ 1208 static void rcu_torture_write_types(void) 1209 { 1210 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full; 1211 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp; 1212 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll; 1213 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync; 1214 1215 /* Initialize synctype[] array. If none set, take default. */ 1216 if (!gp_cond1 && 1217 !gp_cond_exp1 && 1218 !gp_cond_full1 && 1219 !gp_cond_exp_full1 && 1220 !gp_exp1 && 1221 !gp_poll_exp1 && 1222 !gp_poll_exp_full1 && 1223 !gp_normal1 && 1224 !gp_poll1 && 1225 !gp_poll_full1 && 1226 !gp_sync1) { 1227 gp_cond1 = true; 1228 gp_cond_exp1 = true; 1229 gp_cond_full1 = true; 1230 gp_cond_exp_full1 = true; 1231 gp_exp1 = true; 1232 gp_poll_exp1 = true; 1233 gp_poll_exp_full1 = true; 1234 gp_normal1 = true; 1235 gp_poll1 = true; 1236 gp_poll_full1 = true; 1237 gp_sync1 = true; 1238 } 1239 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1240 synctype[nsynctypes++] = RTWS_COND_GET; 1241 pr_info("%s: Testing conditional GPs.\n", __func__); 1242 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1243 pr_alert("%s: gp_cond without primitives.\n", __func__); 1244 } 1245 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { 1246 synctype[nsynctypes++] = RTWS_COND_GET_EXP; 1247 pr_info("%s: Testing conditional expedited GPs.\n", __func__); 1248 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { 1249 pr_alert("%s: gp_cond_exp without primitives.\n", __func__); 1250 } 1251 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { 1252 synctype[nsynctypes++] = RTWS_COND_GET_FULL; 1253 pr_info("%s: Testing conditional full-state GPs.\n", __func__); 1254 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { 1255 pr_alert("%s: gp_cond_full without primitives.\n", __func__); 1256 } 1257 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { 1258 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL; 1259 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); 1260 } else if (gp_cond_exp_full && 1261 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { 1262 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__); 1263 } 1264 if (gp_exp1 && cur_ops->exp_sync) { 1265 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1266 pr_info("%s: Testing expedited GPs.\n", __func__); 1267 } else if (gp_exp && !cur_ops->exp_sync) { 1268 pr_alert("%s: gp_exp without primitives.\n", __func__); 1269 } 1270 if (gp_normal1 && cur_ops->deferred_free) { 1271 synctype[nsynctypes++] = RTWS_DEF_FREE; 1272 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1273 } else if (gp_normal && !cur_ops->deferred_free) { 1274 pr_alert("%s: gp_normal without primitives.\n", __func__); 1275 } 1276 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && 1277 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1278 synctype[nsynctypes++] = RTWS_POLL_GET; 1279 pr_info("%s: Testing polling GPs.\n", __func__); 1280 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1281 pr_alert("%s: gp_poll without primitives.\n", __func__); 1282 } 1283 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full 1284 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { 1285 synctype[nsynctypes++] = RTWS_POLL_GET_FULL; 1286 pr_info("%s: Testing polling full-state GPs.\n", __func__); 1287 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { 1288 pr_alert("%s: gp_poll_full without primitives.\n", __func__); 1289 } 1290 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { 1291 synctype[nsynctypes++] = RTWS_POLL_GET_EXP; 1292 pr_info("%s: Testing polling expedited GPs.\n", __func__); 1293 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { 1294 pr_alert("%s: gp_poll_exp without primitives.\n", __func__); 1295 } 1296 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { 1297 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL; 1298 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); 1299 } else if (gp_poll_exp_full && 1300 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { 1301 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__); 1302 } 1303 if (gp_sync1 && cur_ops->sync) { 1304 synctype[nsynctypes++] = RTWS_SYNC; 1305 pr_info("%s: Testing normal GPs.\n", __func__); 1306 } else if (gp_sync && !cur_ops->sync) { 1307 pr_alert("%s: gp_sync without primitives.\n", __func__); 1308 } 1309 } 1310 1311 /* 1312 * Do the specified rcu_torture_writer() synchronous grace period, 1313 * while also testing out the polled APIs. Note well that the single-CPU 1314 * grace-period optimizations must be accounted for. 1315 */ 1316 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void)) 1317 { 1318 unsigned long cookie; 1319 struct rcu_gp_oldstate cookie_full; 1320 bool dopoll; 1321 bool dopoll_full; 1322 unsigned long r = torture_random(trsp); 1323 1324 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); 1325 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); 1326 if (dopoll || dopoll_full) 1327 cpus_read_lock(); 1328 if (dopoll) 1329 cookie = cur_ops->get_gp_state(); 1330 if (dopoll_full) 1331 cur_ops->get_gp_state_full(&cookie_full); 1332 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) 1333 sync(); 1334 sync(); 1335 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), 1336 "%s: Cookie check 3 failed %pS() online %*pbl.", 1337 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1338 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), 1339 "%s: Cookie check 4 failed %pS() online %*pbl", 1340 __func__, sync, cpumask_pr_args(cpu_online_mask)); 1341 if (dopoll || dopoll_full) 1342 cpus_read_unlock(); 1343 } 1344 1345 /* 1346 * RCU torture writer kthread. Repeatedly substitutes a new structure 1347 * for that pointed to by rcu_torture_current, freeing the old structure 1348 * after a series of grace periods (the "pipeline"). 1349 */ 1350 static int 1351 rcu_torture_writer(void *arg) 1352 { 1353 bool boot_ended; 1354 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1355 unsigned long cookie; 1356 struct rcu_gp_oldstate cookie_full; 1357 int expediting = 0; 1358 unsigned long gp_snap; 1359 unsigned long gp_snap1; 1360 struct rcu_gp_oldstate gp_snap_full; 1361 struct rcu_gp_oldstate gp_snap1_full; 1362 int i; 1363 int idx; 1364 int oldnice = task_nice(current); 1365 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE]; 1366 struct rcu_torture *rp; 1367 struct rcu_torture *old_rp; 1368 static DEFINE_TORTURE_RANDOM(rand); 1369 bool stutter_waited; 1370 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE]; 1371 1372 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1373 if (!can_expedite) 1374 pr_alert("%s" TORTURE_FLAG 1375 " GP expediting controlled from boot/sysfs for %s.\n", 1376 torture_type, cur_ops->name); 1377 if (WARN_ONCE(nsynctypes == 0, 1378 "%s: No update-side primitives.\n", __func__)) { 1379 /* 1380 * No updates primitives, so don't try updating. 1381 * The resulting test won't be testing much, hence the 1382 * above WARN_ONCE(). 1383 */ 1384 rcu_torture_writer_state = RTWS_STOPPING; 1385 torture_kthread_stopping("rcu_torture_writer"); 1386 return 0; 1387 } 1388 1389 do { 1390 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1391 torture_hrtimeout_us(500, 1000, &rand); 1392 rp = rcu_torture_alloc(); 1393 if (rp == NULL) 1394 continue; 1395 rp->rtort_pipe_count = 0; 1396 rcu_torture_writer_state = RTWS_DELAY; 1397 udelay(torture_random(&rand) & 0x3ff); 1398 rcu_torture_writer_state = RTWS_REPLACE; 1399 old_rp = rcu_dereference_check(rcu_torture_current, 1400 current == writer_task); 1401 rp->rtort_mbtest = 1; 1402 rcu_assign_pointer(rcu_torture_current, rp); 1403 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1404 if (old_rp) { 1405 i = old_rp->rtort_pipe_count; 1406 if (i > RCU_TORTURE_PIPE_LEN) 1407 i = RCU_TORTURE_PIPE_LEN; 1408 atomic_inc(&rcu_torture_wcount[i]); 1409 WRITE_ONCE(old_rp->rtort_pipe_count, 1410 old_rp->rtort_pipe_count + 1); 1411 1412 // Make sure readers block polled grace periods. 1413 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1414 idx = cur_ops->readlock(); 1415 cookie = cur_ops->get_gp_state(); 1416 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1417 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1418 __func__, 1419 rcu_torture_writer_state_getname(), 1420 rcu_torture_writer_state, 1421 cookie, cur_ops->get_gp_state()); 1422 if (cur_ops->get_gp_completed) { 1423 cookie = cur_ops->get_gp_completed(); 1424 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); 1425 } 1426 cur_ops->readunlock(idx); 1427 } 1428 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { 1429 idx = cur_ops->readlock(); 1430 cur_ops->get_gp_state_full(&cookie_full); 1431 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 1432 "%s: Cookie check 5 failed %s(%d) online %*pbl\n", 1433 __func__, 1434 rcu_torture_writer_state_getname(), 1435 rcu_torture_writer_state, 1436 cpumask_pr_args(cpu_online_mask)); 1437 if (cur_ops->get_gp_completed_full) { 1438 cur_ops->get_gp_completed_full(&cookie_full); 1439 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); 1440 } 1441 cur_ops->readunlock(idx); 1442 } 1443 switch (synctype[torture_random(&rand) % nsynctypes]) { 1444 case RTWS_DEF_FREE: 1445 rcu_torture_writer_state = RTWS_DEF_FREE; 1446 cur_ops->deferred_free(old_rp); 1447 break; 1448 case RTWS_EXP_SYNC: 1449 rcu_torture_writer_state = RTWS_EXP_SYNC; 1450 do_rtws_sync(&rand, cur_ops->exp_sync); 1451 rcu_torture_pipe_update(old_rp); 1452 break; 1453 case RTWS_COND_GET: 1454 rcu_torture_writer_state = RTWS_COND_GET; 1455 gp_snap = cur_ops->get_gp_state(); 1456 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1457 rcu_torture_writer_state = RTWS_COND_SYNC; 1458 cur_ops->cond_sync(gp_snap); 1459 rcu_torture_pipe_update(old_rp); 1460 break; 1461 case RTWS_COND_GET_EXP: 1462 rcu_torture_writer_state = RTWS_COND_GET_EXP; 1463 gp_snap = cur_ops->get_gp_state_exp(); 1464 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1465 rcu_torture_writer_state = RTWS_COND_SYNC_EXP; 1466 cur_ops->cond_sync_exp(gp_snap); 1467 rcu_torture_pipe_update(old_rp); 1468 break; 1469 case RTWS_COND_GET_FULL: 1470 rcu_torture_writer_state = RTWS_COND_GET_FULL; 1471 cur_ops->get_gp_state_full(&gp_snap_full); 1472 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1473 rcu_torture_writer_state = RTWS_COND_SYNC_FULL; 1474 cur_ops->cond_sync_full(&gp_snap_full); 1475 rcu_torture_pipe_update(old_rp); 1476 break; 1477 case RTWS_COND_GET_EXP_FULL: 1478 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL; 1479 cur_ops->get_gp_state_full(&gp_snap_full); 1480 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1481 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL; 1482 cur_ops->cond_sync_exp_full(&gp_snap_full); 1483 rcu_torture_pipe_update(old_rp); 1484 break; 1485 case RTWS_POLL_GET: 1486 rcu_torture_writer_state = RTWS_POLL_GET; 1487 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1488 ulo[i] = cur_ops->get_comp_state(); 1489 gp_snap = cur_ops->start_gp_poll(); 1490 rcu_torture_writer_state = RTWS_POLL_WAIT; 1491 while (!cur_ops->poll_gp_state(gp_snap)) { 1492 gp_snap1 = cur_ops->get_gp_state(); 1493 for (i = 0; i < ARRAY_SIZE(ulo); i++) 1494 if (cur_ops->poll_gp_state(ulo[i]) || 1495 cur_ops->same_gp_state(ulo[i], gp_snap1)) { 1496 ulo[i] = gp_snap1; 1497 break; 1498 } 1499 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo)); 1500 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1501 &rand); 1502 } 1503 rcu_torture_pipe_update(old_rp); 1504 break; 1505 case RTWS_POLL_GET_FULL: 1506 rcu_torture_writer_state = RTWS_POLL_GET_FULL; 1507 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1508 cur_ops->get_comp_state_full(&rgo[i]); 1509 cur_ops->start_gp_poll_full(&gp_snap_full); 1510 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; 1511 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1512 cur_ops->get_gp_state_full(&gp_snap1_full); 1513 for (i = 0; i < ARRAY_SIZE(rgo); i++) 1514 if (cur_ops->poll_gp_state_full(&rgo[i]) || 1515 cur_ops->same_gp_state_full(&rgo[i], 1516 &gp_snap1_full)) { 1517 rgo[i] = gp_snap1_full; 1518 break; 1519 } 1520 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo)); 1521 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1522 &rand); 1523 } 1524 rcu_torture_pipe_update(old_rp); 1525 break; 1526 case RTWS_POLL_GET_EXP: 1527 rcu_torture_writer_state = RTWS_POLL_GET_EXP; 1528 gp_snap = cur_ops->start_gp_poll_exp(); 1529 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; 1530 while (!cur_ops->poll_gp_state_exp(gp_snap)) 1531 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1532 &rand); 1533 rcu_torture_pipe_update(old_rp); 1534 break; 1535 case RTWS_POLL_GET_EXP_FULL: 1536 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL; 1537 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1538 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; 1539 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) 1540 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1541 &rand); 1542 rcu_torture_pipe_update(old_rp); 1543 break; 1544 case RTWS_SYNC: 1545 rcu_torture_writer_state = RTWS_SYNC; 1546 do_rtws_sync(&rand, cur_ops->sync); 1547 rcu_torture_pipe_update(old_rp); 1548 break; 1549 default: 1550 WARN_ON_ONCE(1); 1551 break; 1552 } 1553 } 1554 WRITE_ONCE(rcu_torture_current_version, 1555 rcu_torture_current_version + 1); 1556 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1557 if (can_expedite && 1558 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1559 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1560 if (expediting >= 0) 1561 rcu_expedite_gp(); 1562 else 1563 rcu_unexpedite_gp(); 1564 if (++expediting > 3) 1565 expediting = -expediting; 1566 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1567 can_expedite = !rcu_gp_is_expedited() && 1568 !rcu_gp_is_normal(); 1569 } 1570 rcu_torture_writer_state = RTWS_STUTTER; 1571 boot_ended = rcu_inkernel_boot_has_ended(); 1572 stutter_waited = stutter_wait("rcu_torture_writer"); 1573 if (stutter_waited && 1574 !atomic_read(&rcu_fwd_cb_nodelay) && 1575 !cur_ops->slow_gps && 1576 !torture_must_stop() && 1577 boot_ended) 1578 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1579 if (list_empty(&rcu_tortures[i].rtort_free) && 1580 rcu_access_pointer(rcu_torture_current) != 1581 &rcu_tortures[i]) { 1582 tracing_off(); 1583 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1584 rcu_ftrace_dump(DUMP_ALL); 1585 } 1586 if (stutter_waited) 1587 sched_set_normal(current, oldnice); 1588 } while (!torture_must_stop()); 1589 rcu_torture_current = NULL; // Let stats task know that we are done. 1590 /* Reset expediting back to unexpedited. */ 1591 if (expediting > 0) 1592 expediting = -expediting; 1593 while (can_expedite && expediting++ < 0) 1594 rcu_unexpedite_gp(); 1595 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1596 if (!can_expedite) 1597 pr_alert("%s" TORTURE_FLAG 1598 " Dynamic grace-period expediting was disabled.\n", 1599 torture_type); 1600 rcu_torture_writer_state = RTWS_STOPPING; 1601 torture_kthread_stopping("rcu_torture_writer"); 1602 return 0; 1603 } 1604 1605 /* 1606 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1607 * delay between calls. 1608 */ 1609 static int 1610 rcu_torture_fakewriter(void *arg) 1611 { 1612 unsigned long gp_snap; 1613 struct rcu_gp_oldstate gp_snap_full; 1614 DEFINE_TORTURE_RANDOM(rand); 1615 1616 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1617 set_user_nice(current, MAX_NICE); 1618 1619 if (WARN_ONCE(nsynctypes == 0, 1620 "%s: No update-side primitives.\n", __func__)) { 1621 /* 1622 * No updates primitives, so don't try updating. 1623 * The resulting test won't be testing much, hence the 1624 * above WARN_ONCE(). 1625 */ 1626 torture_kthread_stopping("rcu_torture_fakewriter"); 1627 return 0; 1628 } 1629 1630 do { 1631 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1632 if (cur_ops->cb_barrier != NULL && 1633 torture_random(&rand) % (nfakewriters * 8) == 0) { 1634 cur_ops->cb_barrier(); 1635 } else { 1636 switch (synctype[torture_random(&rand) % nsynctypes]) { 1637 case RTWS_DEF_FREE: 1638 break; 1639 case RTWS_EXP_SYNC: 1640 cur_ops->exp_sync(); 1641 break; 1642 case RTWS_COND_GET: 1643 gp_snap = cur_ops->get_gp_state(); 1644 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1645 cur_ops->cond_sync(gp_snap); 1646 break; 1647 case RTWS_COND_GET_EXP: 1648 gp_snap = cur_ops->get_gp_state_exp(); 1649 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1650 cur_ops->cond_sync_exp(gp_snap); 1651 break; 1652 case RTWS_COND_GET_FULL: 1653 cur_ops->get_gp_state_full(&gp_snap_full); 1654 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1655 cur_ops->cond_sync_full(&gp_snap_full); 1656 break; 1657 case RTWS_COND_GET_EXP_FULL: 1658 cur_ops->get_gp_state_full(&gp_snap_full); 1659 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1660 cur_ops->cond_sync_exp_full(&gp_snap_full); 1661 break; 1662 case RTWS_POLL_GET: 1663 gp_snap = cur_ops->start_gp_poll(); 1664 while (!cur_ops->poll_gp_state(gp_snap)) { 1665 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1666 &rand); 1667 } 1668 break; 1669 case RTWS_POLL_GET_FULL: 1670 cur_ops->start_gp_poll_full(&gp_snap_full); 1671 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1672 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1673 &rand); 1674 } 1675 break; 1676 case RTWS_POLL_GET_EXP: 1677 gp_snap = cur_ops->start_gp_poll_exp(); 1678 while (!cur_ops->poll_gp_state_exp(gp_snap)) { 1679 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1680 &rand); 1681 } 1682 break; 1683 case RTWS_POLL_GET_EXP_FULL: 1684 cur_ops->start_gp_poll_exp_full(&gp_snap_full); 1685 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { 1686 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1687 &rand); 1688 } 1689 break; 1690 case RTWS_SYNC: 1691 cur_ops->sync(); 1692 break; 1693 default: 1694 WARN_ON_ONCE(1); 1695 break; 1696 } 1697 } 1698 stutter_wait("rcu_torture_fakewriter"); 1699 } while (!torture_must_stop()); 1700 1701 torture_kthread_stopping("rcu_torture_fakewriter"); 1702 return 0; 1703 } 1704 1705 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1706 { 1707 kfree(rhp); 1708 } 1709 1710 // Set up and carry out testing of RCU's global memory ordering 1711 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1712 struct torture_random_state *trsp) 1713 { 1714 unsigned long loops; 1715 int noc = torture_num_online_cpus(); 1716 int rdrchked; 1717 int rdrchker; 1718 struct rcu_torture_reader_check *rtrcp; // Me. 1719 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1720 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1721 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1722 1723 if (myid < 0) 1724 return; // Don't try this from timer handlers. 1725 1726 // Increment my counter. 1727 rtrcp = &rcu_torture_reader_mbchk[myid]; 1728 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1729 1730 // Attempt to assign someone else some checking work. 1731 rdrchked = torture_random(trsp) % nrealreaders; 1732 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1733 rdrchker = torture_random(trsp) % nrealreaders; 1734 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1735 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1736 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1737 !READ_ONCE(rtp->rtort_chkp) && 1738 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1739 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1740 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1741 rtrcp->rtc_chkrdr = rdrchked; 1742 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1743 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1744 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1745 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1746 } 1747 1748 // If assigned some completed work, do it! 1749 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1750 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1751 return; // No work or work not yet ready. 1752 rdrchked = rtrcp_assigner->rtc_chkrdr; 1753 if (WARN_ON_ONCE(rdrchked < 0)) 1754 return; 1755 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1756 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1757 atomic_inc(&n_rcu_torture_mbchk_tries); 1758 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1759 atomic_inc(&n_rcu_torture_mbchk_fail); 1760 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1761 rtrcp_assigner->rtc_ready = 0; 1762 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1763 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1764 } 1765 1766 /* 1767 * Do one extension of an RCU read-side critical section using the 1768 * current reader state in readstate (set to zero for initial entry 1769 * to extended critical section), set the new state as specified by 1770 * newstate (set to zero for final exit from extended critical section), 1771 * and random-number-generator state in trsp. If this is neither the 1772 * beginning or end of the critical section and if there was actually a 1773 * change, do a ->read_delay(). 1774 */ 1775 static void rcutorture_one_extend(int *readstate, int newstate, 1776 struct torture_random_state *trsp, 1777 struct rt_read_seg *rtrsp) 1778 { 1779 unsigned long flags; 1780 int idxnew1 = -1; 1781 int idxnew2 = -1; 1782 int idxold1 = *readstate; 1783 int idxold2 = idxold1; 1784 int statesnew = ~*readstate & newstate; 1785 int statesold = *readstate & ~newstate; 1786 1787 WARN_ON_ONCE(idxold2 < 0); 1788 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1789 rtrsp->rt_readstate = newstate; 1790 1791 /* First, put new protection in place to avoid critical-section gap. */ 1792 if (statesnew & RCUTORTURE_RDR_BH) 1793 local_bh_disable(); 1794 if (statesnew & RCUTORTURE_RDR_RBH) 1795 rcu_read_lock_bh(); 1796 if (statesnew & RCUTORTURE_RDR_IRQ) 1797 local_irq_disable(); 1798 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1799 preempt_disable(); 1800 if (statesnew & RCUTORTURE_RDR_SCHED) 1801 rcu_read_lock_sched(); 1802 if (statesnew & RCUTORTURE_RDR_RCU_1) 1803 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1804 if (statesnew & RCUTORTURE_RDR_RCU_2) 1805 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1806 1807 /* 1808 * Next, remove old protection, in decreasing order of strength 1809 * to avoid unlock paths that aren't safe in the stronger 1810 * context. Namely: BH can not be enabled with disabled interrupts. 1811 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1812 * context. 1813 */ 1814 if (statesold & RCUTORTURE_RDR_IRQ) 1815 local_irq_enable(); 1816 if (statesold & RCUTORTURE_RDR_PREEMPT) 1817 preempt_enable(); 1818 if (statesold & RCUTORTURE_RDR_SCHED) 1819 rcu_read_unlock_sched(); 1820 if (statesold & RCUTORTURE_RDR_BH) 1821 local_bh_enable(); 1822 if (statesold & RCUTORTURE_RDR_RBH) 1823 rcu_read_unlock_bh(); 1824 if (statesold & RCUTORTURE_RDR_RCU_2) { 1825 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1826 WARN_ON_ONCE(idxnew2 != -1); 1827 idxold2 = 0; 1828 } 1829 if (statesold & RCUTORTURE_RDR_RCU_1) { 1830 bool lockit; 1831 1832 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1833 if (lockit) 1834 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1835 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1836 WARN_ON_ONCE(idxnew1 != -1); 1837 idxold1 = 0; 1838 if (lockit) 1839 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1840 } 1841 1842 /* Delay if neither beginning nor end and there was a change. */ 1843 if ((statesnew || statesold) && *readstate && newstate) 1844 cur_ops->read_delay(trsp, rtrsp); 1845 1846 /* Update the reader state. */ 1847 if (idxnew1 == -1) 1848 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1849 WARN_ON_ONCE(idxnew1 < 0); 1850 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1851 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1852 if (idxnew2 == -1) 1853 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1854 WARN_ON_ONCE(idxnew2 < 0); 1855 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1856 *readstate = idxnew1 | idxnew2 | newstate; 1857 WARN_ON_ONCE(*readstate < 0); 1858 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1859 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1860 } 1861 1862 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1863 static int rcutorture_extend_mask_max(void) 1864 { 1865 int mask; 1866 1867 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1868 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1869 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1870 return mask; 1871 } 1872 1873 /* Return a random protection state mask, but with at least one bit set. */ 1874 static int 1875 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1876 { 1877 int mask = rcutorture_extend_mask_max(); 1878 unsigned long randmask1 = torture_random(trsp) >> 8; 1879 unsigned long randmask2 = randmask1 >> 3; 1880 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1881 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1882 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1883 1884 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1885 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1886 if (!(randmask1 & 0x7)) 1887 mask = mask & randmask2; 1888 else 1889 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1890 1891 // Can't have nested RCU reader without outer RCU reader. 1892 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1893 if (oldmask & RCUTORTURE_RDR_RCU_1) 1894 mask &= ~RCUTORTURE_RDR_RCU_2; 1895 else 1896 mask |= RCUTORTURE_RDR_RCU_1; 1897 } 1898 1899 /* 1900 * Can't enable bh w/irq disabled. 1901 */ 1902 if (mask & RCUTORTURE_RDR_IRQ) 1903 mask |= oldmask & bhs; 1904 1905 /* 1906 * Ideally these sequences would be detected in debug builds 1907 * (regardless of RT), but until then don't stop testing 1908 * them on non-RT. 1909 */ 1910 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1911 /* Can't modify BH in atomic context */ 1912 if (oldmask & preempts_irq) 1913 mask &= ~bhs; 1914 if ((oldmask | mask) & preempts_irq) 1915 mask |= oldmask & bhs; 1916 } 1917 1918 return mask ?: RCUTORTURE_RDR_RCU_1; 1919 } 1920 1921 /* 1922 * Do a randomly selected number of extensions of an existing RCU read-side 1923 * critical section. 1924 */ 1925 static struct rt_read_seg * 1926 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1927 struct rt_read_seg *rtrsp) 1928 { 1929 int i; 1930 int j; 1931 int mask = rcutorture_extend_mask_max(); 1932 1933 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1934 if (!((mask - 1) & mask)) 1935 return rtrsp; /* Current RCU reader not extendable. */ 1936 /* Bias towards larger numbers of loops. */ 1937 i = (torture_random(trsp) >> 3); 1938 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1939 for (j = 0; j < i; j++) { 1940 mask = rcutorture_extend_mask(*readstate, trsp); 1941 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1942 } 1943 return &rtrsp[j]; 1944 } 1945 1946 /* 1947 * Do one read-side critical section, returning false if there was 1948 * no data to read. Can be invoked both from process context and 1949 * from a timer handler. 1950 */ 1951 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1952 { 1953 bool checkpolling = !(torture_random(trsp) & 0xfff); 1954 unsigned long cookie; 1955 struct rcu_gp_oldstate cookie_full; 1956 int i; 1957 unsigned long started; 1958 unsigned long completed; 1959 int newstate; 1960 struct rcu_torture *p; 1961 int pipe_count; 1962 int readstate = 0; 1963 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1964 struct rt_read_seg *rtrsp = &rtseg[0]; 1965 struct rt_read_seg *rtrsp1; 1966 unsigned long long ts; 1967 1968 WARN_ON_ONCE(!rcu_is_watching()); 1969 newstate = rcutorture_extend_mask(readstate, trsp); 1970 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1971 if (checkpolling) { 1972 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1973 cookie = cur_ops->get_gp_state(); 1974 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 1975 cur_ops->get_gp_state_full(&cookie_full); 1976 } 1977 started = cur_ops->get_gp_seq(); 1978 ts = rcu_trace_clock_local(); 1979 p = rcu_dereference_check(rcu_torture_current, 1980 !cur_ops->readlock_held || cur_ops->readlock_held()); 1981 if (p == NULL) { 1982 /* Wait for rcu_torture_writer to get underway */ 1983 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1984 return false; 1985 } 1986 if (p->rtort_mbtest == 0) 1987 atomic_inc(&n_rcu_torture_mberror); 1988 rcu_torture_reader_do_mbchk(myid, p, trsp); 1989 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1990 preempt_disable(); 1991 pipe_count = READ_ONCE(p->rtort_pipe_count); 1992 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1993 /* Should not happen, but... */ 1994 pipe_count = RCU_TORTURE_PIPE_LEN; 1995 } 1996 completed = cur_ops->get_gp_seq(); 1997 if (pipe_count > 1) { 1998 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1999 ts, started, completed); 2000 rcu_ftrace_dump(DUMP_ALL); 2001 } 2002 __this_cpu_inc(rcu_torture_count[pipe_count]); 2003 completed = rcutorture_seq_diff(completed, started); 2004 if (completed > RCU_TORTURE_PIPE_LEN) { 2005 /* Should not happen, but... */ 2006 completed = RCU_TORTURE_PIPE_LEN; 2007 } 2008 __this_cpu_inc(rcu_torture_batch[completed]); 2009 preempt_enable(); 2010 if (checkpolling) { 2011 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 2012 WARN_ONCE(cur_ops->poll_gp_state(cookie), 2013 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 2014 __func__, 2015 rcu_torture_writer_state_getname(), 2016 rcu_torture_writer_state, 2017 cookie, cur_ops->get_gp_state()); 2018 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) 2019 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), 2020 "%s: Cookie check 6 failed %s(%d) online %*pbl\n", 2021 __func__, 2022 rcu_torture_writer_state_getname(), 2023 rcu_torture_writer_state, 2024 cpumask_pr_args(cpu_online_mask)); 2025 } 2026 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 2027 WARN_ON_ONCE(readstate); 2028 // This next splat is expected behavior if leakpointer, especially 2029 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 2030 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 2031 2032 /* If error or close call, record the sequence of reader protections. */ 2033 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 2034 i = 0; 2035 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 2036 err_segs[i++] = *rtrsp1; 2037 rt_read_nsegs = i; 2038 } 2039 2040 return true; 2041 } 2042 2043 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 2044 2045 /* 2046 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 2047 * incrementing the corresponding element of the pipeline array. The 2048 * counter in the element should never be greater than 1, otherwise, the 2049 * RCU implementation is broken. 2050 */ 2051 static void rcu_torture_timer(struct timer_list *unused) 2052 { 2053 atomic_long_inc(&n_rcu_torture_timers); 2054 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 2055 2056 /* Test call_rcu() invocation from interrupt handler. */ 2057 if (cur_ops->call) { 2058 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 2059 2060 if (rhp) 2061 cur_ops->call(rhp, rcu_torture_timer_cb); 2062 } 2063 } 2064 2065 /* 2066 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 2067 * incrementing the corresponding element of the pipeline array. The 2068 * counter in the element should never be greater than 1, otherwise, the 2069 * RCU implementation is broken. 2070 */ 2071 static int 2072 rcu_torture_reader(void *arg) 2073 { 2074 unsigned long lastsleep = jiffies; 2075 long myid = (long)arg; 2076 int mynumonline = myid; 2077 DEFINE_TORTURE_RANDOM(rand); 2078 struct timer_list t; 2079 2080 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 2081 set_user_nice(current, MAX_NICE); 2082 if (irqreader && cur_ops->irq_capable) 2083 timer_setup_on_stack(&t, rcu_torture_timer, 0); 2084 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2085 do { 2086 if (irqreader && cur_ops->irq_capable) { 2087 if (!timer_pending(&t)) 2088 mod_timer(&t, jiffies + 1); 2089 } 2090 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 2091 schedule_timeout_interruptible(HZ); 2092 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 2093 torture_hrtimeout_us(500, 1000, &rand); 2094 lastsleep = jiffies + 10; 2095 } 2096 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 2097 schedule_timeout_interruptible(HZ / 5); 2098 stutter_wait("rcu_torture_reader"); 2099 } while (!torture_must_stop()); 2100 if (irqreader && cur_ops->irq_capable) { 2101 del_timer_sync(&t); 2102 destroy_timer_on_stack(&t); 2103 } 2104 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2105 torture_kthread_stopping("rcu_torture_reader"); 2106 return 0; 2107 } 2108 2109 /* 2110 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 2111 * increase race probabilities and fuzzes the interval between toggling. 2112 */ 2113 static int rcu_nocb_toggle(void *arg) 2114 { 2115 int cpu; 2116 int maxcpu = -1; 2117 int oldnice = task_nice(current); 2118 long r; 2119 DEFINE_TORTURE_RANDOM(rand); 2120 ktime_t toggle_delay; 2121 unsigned long toggle_fuzz; 2122 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 2123 2124 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 2125 while (!rcu_inkernel_boot_has_ended()) 2126 schedule_timeout_interruptible(HZ / 10); 2127 for_each_online_cpu(cpu) 2128 maxcpu = cpu; 2129 WARN_ON(maxcpu < 0); 2130 if (toggle_interval > ULONG_MAX) 2131 toggle_fuzz = ULONG_MAX >> 3; 2132 else 2133 toggle_fuzz = toggle_interval >> 3; 2134 if (toggle_fuzz <= 0) 2135 toggle_fuzz = NSEC_PER_USEC; 2136 do { 2137 r = torture_random(&rand); 2138 cpu = (r >> 4) % (maxcpu + 1); 2139 if (r & 0x1) { 2140 rcu_nocb_cpu_offload(cpu); 2141 atomic_long_inc(&n_nocb_offload); 2142 } else { 2143 rcu_nocb_cpu_deoffload(cpu); 2144 atomic_long_inc(&n_nocb_deoffload); 2145 } 2146 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 2147 set_current_state(TASK_INTERRUPTIBLE); 2148 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 2149 if (stutter_wait("rcu_nocb_toggle")) 2150 sched_set_normal(current, oldnice); 2151 } while (!torture_must_stop()); 2152 torture_kthread_stopping("rcu_nocb_toggle"); 2153 return 0; 2154 } 2155 2156 /* 2157 * Print torture statistics. Caller must ensure that there is only 2158 * one call to this function at a given time!!! This is normally 2159 * accomplished by relying on the module system to only have one copy 2160 * of the module loaded, and then by giving the rcu_torture_stats 2161 * kthread full control (or the init/cleanup functions when rcu_torture_stats 2162 * thread is not running). 2163 */ 2164 static void 2165 rcu_torture_stats_print(void) 2166 { 2167 int cpu; 2168 int i; 2169 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2170 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 2171 struct rcu_torture *rtcp; 2172 static unsigned long rtcv_snap = ULONG_MAX; 2173 static bool splatted; 2174 struct task_struct *wtp; 2175 2176 for_each_possible_cpu(cpu) { 2177 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2178 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 2179 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 2180 } 2181 } 2182 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { 2183 if (pipesummary[i] != 0) 2184 break; 2185 } 2186 2187 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2188 rtcp = rcu_access_pointer(rcu_torture_current); 2189 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 2190 rtcp, 2191 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 2192 rcu_torture_current_version, 2193 list_empty(&rcu_torture_freelist), 2194 atomic_read(&n_rcu_torture_alloc), 2195 atomic_read(&n_rcu_torture_alloc_fail), 2196 atomic_read(&n_rcu_torture_free)); 2197 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", 2198 atomic_read(&n_rcu_torture_mberror), 2199 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 2200 n_rcu_torture_barrier_error, 2201 n_rcu_torture_boost_ktrerror, 2202 n_rcu_torture_boost_rterror); 2203 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 2204 n_rcu_torture_boost_failure, 2205 n_rcu_torture_boosts, 2206 atomic_long_read(&n_rcu_torture_timers)); 2207 torture_onoff_stats(); 2208 pr_cont("barrier: %ld/%ld:%ld ", 2209 data_race(n_barrier_successes), 2210 data_race(n_barrier_attempts), 2211 data_race(n_rcu_torture_barrier_error)); 2212 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 2213 pr_cont("nocb-toggles: %ld:%ld\n", 2214 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 2215 2216 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2217 if (atomic_read(&n_rcu_torture_mberror) || 2218 atomic_read(&n_rcu_torture_mbchk_fail) || 2219 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 2220 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 2221 i > 1) { 2222 pr_cont("%s", "!!! "); 2223 atomic_inc(&n_rcu_torture_error); 2224 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 2225 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 2226 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 2227 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 2228 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 2229 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 2230 WARN_ON_ONCE(i > 1); // Too-short grace period 2231 } 2232 pr_cont("Reader Pipe: "); 2233 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2234 pr_cont(" %ld", pipesummary[i]); 2235 pr_cont("\n"); 2236 2237 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2238 pr_cont("Reader Batch: "); 2239 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2240 pr_cont(" %ld", batchsummary[i]); 2241 pr_cont("\n"); 2242 2243 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 2244 pr_cont("Free-Block Circulation: "); 2245 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2246 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 2247 } 2248 pr_cont("\n"); 2249 2250 if (cur_ops->stats) 2251 cur_ops->stats(); 2252 if (rtcv_snap == rcu_torture_current_version && 2253 rcu_access_pointer(rcu_torture_current) && 2254 !rcu_stall_is_suppressed()) { 2255 int __maybe_unused flags = 0; 2256 unsigned long __maybe_unused gp_seq = 0; 2257 2258 rcutorture_get_gp_data(cur_ops->ttype, 2259 &flags, &gp_seq); 2260 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 2261 &flags, &gp_seq); 2262 wtp = READ_ONCE(writer_task); 2263 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 2264 rcu_torture_writer_state_getname(), 2265 rcu_torture_writer_state, gp_seq, flags, 2266 wtp == NULL ? ~0U : wtp->__state, 2267 wtp == NULL ? -1 : (int)task_cpu(wtp)); 2268 if (!splatted && wtp) { 2269 sched_show_task(wtp); 2270 splatted = true; 2271 } 2272 if (cur_ops->gp_kthread_dbg) 2273 cur_ops->gp_kthread_dbg(); 2274 rcu_ftrace_dump(DUMP_ALL); 2275 } 2276 rtcv_snap = rcu_torture_current_version; 2277 } 2278 2279 /* 2280 * Periodically prints torture statistics, if periodic statistics printing 2281 * was specified via the stat_interval module parameter. 2282 */ 2283 static int 2284 rcu_torture_stats(void *arg) 2285 { 2286 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 2287 do { 2288 schedule_timeout_interruptible(stat_interval * HZ); 2289 rcu_torture_stats_print(); 2290 torture_shutdown_absorb("rcu_torture_stats"); 2291 } while (!torture_must_stop()); 2292 torture_kthread_stopping("rcu_torture_stats"); 2293 return 0; 2294 } 2295 2296 /* Test mem_dump_obj() and friends. */ 2297 static void rcu_torture_mem_dump_obj(void) 2298 { 2299 struct rcu_head *rhp; 2300 struct kmem_cache *kcp; 2301 static int z; 2302 2303 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 2304 if (WARN_ON_ONCE(!kcp)) 2305 return; 2306 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 2307 if (WARN_ON_ONCE(!rhp)) { 2308 kmem_cache_destroy(kcp); 2309 return; 2310 } 2311 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 2312 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 2313 mem_dump_obj(ZERO_SIZE_PTR); 2314 pr_alert("mem_dump_obj(NULL):"); 2315 mem_dump_obj(NULL); 2316 pr_alert("mem_dump_obj(%px):", &rhp); 2317 mem_dump_obj(&rhp); 2318 pr_alert("mem_dump_obj(%px):", rhp); 2319 mem_dump_obj(rhp); 2320 pr_alert("mem_dump_obj(%px):", &rhp->func); 2321 mem_dump_obj(&rhp->func); 2322 pr_alert("mem_dump_obj(%px):", &z); 2323 mem_dump_obj(&z); 2324 kmem_cache_free(kcp, rhp); 2325 kmem_cache_destroy(kcp); 2326 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2327 if (WARN_ON_ONCE(!rhp)) 2328 return; 2329 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2330 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 2331 mem_dump_obj(rhp); 2332 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 2333 mem_dump_obj(&rhp->func); 2334 kfree(rhp); 2335 rhp = vmalloc(4096); 2336 if (WARN_ON_ONCE(!rhp)) 2337 return; 2338 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 2339 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 2340 mem_dump_obj(rhp); 2341 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 2342 mem_dump_obj(&rhp->func); 2343 vfree(rhp); 2344 } 2345 2346 static void 2347 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2348 { 2349 pr_alert("%s" TORTURE_FLAG 2350 "--- %s: nreaders=%d nfakewriters=%d " 2351 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2352 "shuffle_interval=%d stutter=%d irqreader=%d " 2353 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2354 "test_boost=%d/%d test_boost_interval=%d " 2355 "test_boost_duration=%d shutdown_secs=%d " 2356 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2357 "stall_cpu_block=%d " 2358 "n_barrier_cbs=%d " 2359 "onoff_interval=%d onoff_holdoff=%d " 2360 "read_exit_delay=%d read_exit_burst=%d " 2361 "nocbs_nthreads=%d nocbs_toggle=%d\n", 2362 torture_type, tag, nrealreaders, nfakewriters, 2363 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2364 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2365 test_boost, cur_ops->can_boost, 2366 test_boost_interval, test_boost_duration, shutdown_secs, 2367 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2368 stall_cpu_block, 2369 n_barrier_cbs, 2370 onoff_interval, onoff_holdoff, 2371 read_exit_delay, read_exit_burst, 2372 nocbs_nthreads, nocbs_toggle); 2373 } 2374 2375 static int rcutorture_booster_cleanup(unsigned int cpu) 2376 { 2377 struct task_struct *t; 2378 2379 if (boost_tasks[cpu] == NULL) 2380 return 0; 2381 mutex_lock(&boost_mutex); 2382 t = boost_tasks[cpu]; 2383 boost_tasks[cpu] = NULL; 2384 rcu_torture_enable_rt_throttle(); 2385 mutex_unlock(&boost_mutex); 2386 2387 /* This must be outside of the mutex, otherwise deadlock! */ 2388 torture_stop_kthread(rcu_torture_boost, t); 2389 return 0; 2390 } 2391 2392 static int rcutorture_booster_init(unsigned int cpu) 2393 { 2394 int retval; 2395 2396 if (boost_tasks[cpu] != NULL) 2397 return 0; /* Already created, nothing more to do. */ 2398 2399 // Testing RCU priority boosting requires rcutorture do 2400 // some serious abuse. Counter this by running ksoftirqd 2401 // at higher priority. 2402 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 2403 struct sched_param sp; 2404 struct task_struct *t; 2405 2406 t = per_cpu(ksoftirqd, cpu); 2407 WARN_ON_ONCE(!t); 2408 sp.sched_priority = 2; 2409 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2410 } 2411 2412 /* Don't allow time recalculation while creating a new task. */ 2413 mutex_lock(&boost_mutex); 2414 rcu_torture_disable_rt_throttle(); 2415 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2416 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2417 cpu, "rcu_torture_boost_%u"); 2418 if (IS_ERR(boost_tasks[cpu])) { 2419 retval = PTR_ERR(boost_tasks[cpu]); 2420 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2421 n_rcu_torture_boost_ktrerror++; 2422 boost_tasks[cpu] = NULL; 2423 mutex_unlock(&boost_mutex); 2424 return retval; 2425 } 2426 mutex_unlock(&boost_mutex); 2427 return 0; 2428 } 2429 2430 /* 2431 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2432 * induces a CPU stall for the time specified by stall_cpu. 2433 */ 2434 static int rcu_torture_stall(void *args) 2435 { 2436 int idx; 2437 unsigned long stop_at; 2438 2439 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2440 if (stall_cpu_holdoff > 0) { 2441 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2442 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2443 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2444 } 2445 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2446 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2447 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2448 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2449 if (kthread_should_stop()) 2450 break; 2451 schedule_timeout_uninterruptible(HZ); 2452 } 2453 } 2454 if (!kthread_should_stop() && stall_cpu > 0) { 2455 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2456 stop_at = ktime_get_seconds() + stall_cpu; 2457 /* RCU CPU stall is expected behavior in following code. */ 2458 idx = cur_ops->readlock(); 2459 if (stall_cpu_irqsoff) 2460 local_irq_disable(); 2461 else if (!stall_cpu_block) 2462 preempt_disable(); 2463 pr_alert("%s start on CPU %d.\n", 2464 __func__, raw_smp_processor_id()); 2465 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 2466 stop_at)) 2467 if (stall_cpu_block) { 2468 #ifdef CONFIG_PREEMPTION 2469 preempt_schedule(); 2470 #else 2471 schedule_timeout_uninterruptible(HZ); 2472 #endif 2473 } else if (stall_no_softlockup) { 2474 touch_softlockup_watchdog(); 2475 } 2476 if (stall_cpu_irqsoff) 2477 local_irq_enable(); 2478 else if (!stall_cpu_block) 2479 preempt_enable(); 2480 cur_ops->readunlock(idx); 2481 } 2482 pr_alert("%s end.\n", __func__); 2483 torture_shutdown_absorb("rcu_torture_stall"); 2484 while (!kthread_should_stop()) 2485 schedule_timeout_interruptible(10 * HZ); 2486 return 0; 2487 } 2488 2489 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2490 static int __init rcu_torture_stall_init(void) 2491 { 2492 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2493 return 0; 2494 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2495 } 2496 2497 /* State structure for forward-progress self-propagating RCU callback. */ 2498 struct fwd_cb_state { 2499 struct rcu_head rh; 2500 int stop; 2501 }; 2502 2503 /* 2504 * Forward-progress self-propagating RCU callback function. Because 2505 * callbacks run from softirq, this function is an implicit RCU read-side 2506 * critical section. 2507 */ 2508 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2509 { 2510 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2511 2512 if (READ_ONCE(fcsp->stop)) { 2513 WRITE_ONCE(fcsp->stop, 2); 2514 return; 2515 } 2516 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2517 } 2518 2519 /* State for continuous-flood RCU callbacks. */ 2520 struct rcu_fwd_cb { 2521 struct rcu_head rh; 2522 struct rcu_fwd_cb *rfc_next; 2523 struct rcu_fwd *rfc_rfp; 2524 int rfc_gps; 2525 }; 2526 2527 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2528 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2529 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2530 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2531 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2532 2533 struct rcu_launder_hist { 2534 long n_launders; 2535 unsigned long launder_gp_seq; 2536 }; 2537 2538 struct rcu_fwd { 2539 spinlock_t rcu_fwd_lock; 2540 struct rcu_fwd_cb *rcu_fwd_cb_head; 2541 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2542 long n_launders_cb; 2543 unsigned long rcu_fwd_startat; 2544 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2545 unsigned long rcu_launder_gp_seq_start; 2546 int rcu_fwd_id; 2547 }; 2548 2549 static DEFINE_MUTEX(rcu_fwd_mutex); 2550 static struct rcu_fwd *rcu_fwds; 2551 static unsigned long rcu_fwd_seq; 2552 static atomic_long_t rcu_fwd_max_cbs; 2553 static bool rcu_fwd_emergency_stop; 2554 2555 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2556 { 2557 unsigned long gps; 2558 unsigned long gps_old; 2559 int i; 2560 int j; 2561 2562 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2563 if (rfp->n_launders_hist[i].n_launders > 0) 2564 break; 2565 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2566 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2567 gps_old = rfp->rcu_launder_gp_seq_start; 2568 for (j = 0; j <= i; j++) { 2569 gps = rfp->n_launders_hist[j].launder_gp_seq; 2570 pr_cont(" %ds/%d: %ld:%ld", 2571 j + 1, FWD_CBS_HIST_DIV, 2572 rfp->n_launders_hist[j].n_launders, 2573 rcutorture_seq_diff(gps, gps_old)); 2574 gps_old = gps; 2575 } 2576 pr_cont("\n"); 2577 } 2578 2579 /* Callback function for continuous-flood RCU callbacks. */ 2580 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2581 { 2582 unsigned long flags; 2583 int i; 2584 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2585 struct rcu_fwd_cb **rfcpp; 2586 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2587 2588 rfcp->rfc_next = NULL; 2589 rfcp->rfc_gps++; 2590 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2591 rfcpp = rfp->rcu_fwd_cb_tail; 2592 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2593 WRITE_ONCE(*rfcpp, rfcp); 2594 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2595 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2596 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2597 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2598 rfp->n_launders_hist[i].n_launders++; 2599 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2600 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2601 } 2602 2603 // Give the scheduler a chance, even on nohz_full CPUs. 2604 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2605 { 2606 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2607 // Real call_rcu() floods hit userspace, so emulate that. 2608 if (need_resched() || (iter & 0xfff)) 2609 schedule(); 2610 return; 2611 } 2612 // No userspace emulation: CB invocation throttles call_rcu() 2613 cond_resched(); 2614 } 2615 2616 /* 2617 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2618 * test is over or because we hit an OOM event. 2619 */ 2620 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2621 { 2622 unsigned long flags; 2623 unsigned long freed = 0; 2624 struct rcu_fwd_cb *rfcp; 2625 2626 for (;;) { 2627 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2628 rfcp = rfp->rcu_fwd_cb_head; 2629 if (!rfcp) { 2630 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2631 break; 2632 } 2633 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2634 if (!rfp->rcu_fwd_cb_head) 2635 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2636 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2637 kfree(rfcp); 2638 freed++; 2639 rcu_torture_fwd_prog_cond_resched(freed); 2640 if (tick_nohz_full_enabled()) { 2641 local_irq_save(flags); 2642 rcu_momentary_dyntick_idle(); 2643 local_irq_restore(flags); 2644 } 2645 } 2646 return freed; 2647 } 2648 2649 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2650 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2651 int *tested, int *tested_tries) 2652 { 2653 unsigned long cver; 2654 unsigned long dur; 2655 struct fwd_cb_state fcs; 2656 unsigned long gps; 2657 int idx; 2658 int sd; 2659 int sd4; 2660 bool selfpropcb = false; 2661 unsigned long stopat; 2662 static DEFINE_TORTURE_RANDOM(trs); 2663 2664 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2665 if (!cur_ops->sync) 2666 return; // Cannot do need_resched() forward progress testing without ->sync. 2667 if (cur_ops->call && cur_ops->cb_barrier) { 2668 init_rcu_head_on_stack(&fcs.rh); 2669 selfpropcb = true; 2670 } 2671 2672 /* Tight loop containing cond_resched(). */ 2673 atomic_inc(&rcu_fwd_cb_nodelay); 2674 cur_ops->sync(); /* Later readers see above write. */ 2675 if (selfpropcb) { 2676 WRITE_ONCE(fcs.stop, 0); 2677 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2678 } 2679 cver = READ_ONCE(rcu_torture_current_version); 2680 gps = cur_ops->get_gp_seq(); 2681 sd = cur_ops->stall_dur() + 1; 2682 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2683 dur = sd4 + torture_random(&trs) % (sd - sd4); 2684 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2685 stopat = rfp->rcu_fwd_startat + dur; 2686 while (time_before(jiffies, stopat) && 2687 !shutdown_time_arrived() && 2688 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2689 idx = cur_ops->readlock(); 2690 udelay(10); 2691 cur_ops->readunlock(idx); 2692 if (!fwd_progress_need_resched || need_resched()) 2693 cond_resched(); 2694 } 2695 (*tested_tries)++; 2696 if (!time_before(jiffies, stopat) && 2697 !shutdown_time_arrived() && 2698 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2699 (*tested)++; 2700 cver = READ_ONCE(rcu_torture_current_version) - cver; 2701 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2702 WARN_ON(!cver && gps < 2); 2703 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2704 rfp->rcu_fwd_id, dur, cver, gps); 2705 } 2706 if (selfpropcb) { 2707 WRITE_ONCE(fcs.stop, 1); 2708 cur_ops->sync(); /* Wait for running CB to complete. */ 2709 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2710 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2711 } 2712 2713 if (selfpropcb) { 2714 WARN_ON(READ_ONCE(fcs.stop) != 2); 2715 destroy_rcu_head_on_stack(&fcs.rh); 2716 } 2717 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2718 atomic_dec(&rcu_fwd_cb_nodelay); 2719 } 2720 2721 /* Carry out call_rcu() forward-progress testing. */ 2722 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2723 { 2724 unsigned long cver; 2725 unsigned long flags; 2726 unsigned long gps; 2727 int i; 2728 long n_launders; 2729 long n_launders_cb_snap; 2730 long n_launders_sa; 2731 long n_max_cbs; 2732 long n_max_gps; 2733 struct rcu_fwd_cb *rfcp; 2734 struct rcu_fwd_cb *rfcpn; 2735 unsigned long stopat; 2736 unsigned long stoppedat; 2737 2738 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2739 if (READ_ONCE(rcu_fwd_emergency_stop)) 2740 return; /* Get out of the way quickly, no GP wait! */ 2741 if (!cur_ops->call) 2742 return; /* Can't do call_rcu() fwd prog without ->call. */ 2743 2744 /* Loop continuously posting RCU callbacks. */ 2745 atomic_inc(&rcu_fwd_cb_nodelay); 2746 cur_ops->sync(); /* Later readers see above write. */ 2747 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2748 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2749 n_launders = 0; 2750 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2751 n_launders_sa = 0; 2752 n_max_cbs = 0; 2753 n_max_gps = 0; 2754 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2755 rfp->n_launders_hist[i].n_launders = 0; 2756 cver = READ_ONCE(rcu_torture_current_version); 2757 gps = cur_ops->get_gp_seq(); 2758 rfp->rcu_launder_gp_seq_start = gps; 2759 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2760 while (time_before(jiffies, stopat) && 2761 !shutdown_time_arrived() && 2762 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2763 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2764 rfcpn = NULL; 2765 if (rfcp) 2766 rfcpn = READ_ONCE(rfcp->rfc_next); 2767 if (rfcpn) { 2768 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2769 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2770 break; 2771 rfp->rcu_fwd_cb_head = rfcpn; 2772 n_launders++; 2773 n_launders_sa++; 2774 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2775 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2776 if (WARN_ON_ONCE(!rfcp)) { 2777 schedule_timeout_interruptible(1); 2778 continue; 2779 } 2780 n_max_cbs++; 2781 n_launders_sa = 0; 2782 rfcp->rfc_gps = 0; 2783 rfcp->rfc_rfp = rfp; 2784 } else { 2785 rfcp = NULL; 2786 } 2787 if (rfcp) 2788 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2789 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2790 if (tick_nohz_full_enabled()) { 2791 local_irq_save(flags); 2792 rcu_momentary_dyntick_idle(); 2793 local_irq_restore(flags); 2794 } 2795 } 2796 stoppedat = jiffies; 2797 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2798 cver = READ_ONCE(rcu_torture_current_version) - cver; 2799 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2800 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2801 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2802 (void)rcu_torture_fwd_prog_cbfree(rfp); 2803 2804 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2805 !shutdown_time_arrived()) { 2806 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2807 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2808 __func__, 2809 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2810 n_launders + n_max_cbs - n_launders_cb_snap, 2811 n_launders, n_launders_sa, 2812 n_max_gps, n_max_cbs, cver, gps); 2813 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2814 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2815 rcu_torture_fwd_cb_hist(rfp); 2816 mutex_unlock(&rcu_fwd_mutex); 2817 } 2818 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2819 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2820 atomic_dec(&rcu_fwd_cb_nodelay); 2821 } 2822 2823 2824 /* 2825 * OOM notifier, but this only prints diagnostic information for the 2826 * current forward-progress test. 2827 */ 2828 static int rcutorture_oom_notify(struct notifier_block *self, 2829 unsigned long notused, void *nfreed) 2830 { 2831 int i; 2832 long ncbs; 2833 struct rcu_fwd *rfp; 2834 2835 mutex_lock(&rcu_fwd_mutex); 2836 rfp = rcu_fwds; 2837 if (!rfp) { 2838 mutex_unlock(&rcu_fwd_mutex); 2839 return NOTIFY_OK; 2840 } 2841 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2842 __func__); 2843 for (i = 0; i < fwd_progress; i++) { 2844 rcu_torture_fwd_cb_hist(&rfp[i]); 2845 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2846 } 2847 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2848 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2849 ncbs = 0; 2850 for (i = 0; i < fwd_progress; i++) 2851 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2852 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2853 cur_ops->cb_barrier(); 2854 ncbs = 0; 2855 for (i = 0; i < fwd_progress; i++) 2856 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2857 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2858 cur_ops->cb_barrier(); 2859 ncbs = 0; 2860 for (i = 0; i < fwd_progress; i++) 2861 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2862 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2863 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2864 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2865 pr_info("%s returning after OOM processing.\n", __func__); 2866 mutex_unlock(&rcu_fwd_mutex); 2867 return NOTIFY_OK; 2868 } 2869 2870 static struct notifier_block rcutorture_oom_nb = { 2871 .notifier_call = rcutorture_oom_notify 2872 }; 2873 2874 /* Carry out grace-period forward-progress testing. */ 2875 static int rcu_torture_fwd_prog(void *args) 2876 { 2877 bool firsttime = true; 2878 long max_cbs; 2879 int oldnice = task_nice(current); 2880 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2881 struct rcu_fwd *rfp = args; 2882 int tested = 0; 2883 int tested_tries = 0; 2884 2885 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2886 rcu_bind_current_to_nocb(); 2887 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2888 set_user_nice(current, MAX_NICE); 2889 do { 2890 if (!rfp->rcu_fwd_id) { 2891 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2892 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2893 if (!firsttime) { 2894 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2895 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2896 } 2897 firsttime = false; 2898 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2899 } else { 2900 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2901 schedule_timeout_interruptible(1); 2902 oldseq = READ_ONCE(rcu_fwd_seq); 2903 } 2904 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2905 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2906 rcu_torture_fwd_prog_cr(rfp); 2907 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2908 (!IS_ENABLED(CONFIG_TINY_RCU) || 2909 (rcu_inkernel_boot_has_ended() && 2910 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2911 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2912 2913 /* Avoid slow periods, better to test when busy. */ 2914 if (stutter_wait("rcu_torture_fwd_prog")) 2915 sched_set_normal(current, oldnice); 2916 } while (!torture_must_stop()); 2917 /* Short runs might not contain a valid forward-progress attempt. */ 2918 if (!rfp->rcu_fwd_id) { 2919 WARN_ON(!tested && tested_tries >= 5); 2920 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2921 } 2922 torture_kthread_stopping("rcu_torture_fwd_prog"); 2923 return 0; 2924 } 2925 2926 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2927 static int __init rcu_torture_fwd_prog_init(void) 2928 { 2929 int i; 2930 int ret = 0; 2931 struct rcu_fwd *rfp; 2932 2933 if (!fwd_progress) 2934 return 0; /* Not requested, so don't do it. */ 2935 if (fwd_progress >= nr_cpu_ids) { 2936 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2937 fwd_progress = nr_cpu_ids; 2938 } else if (fwd_progress < 0) { 2939 fwd_progress = nr_cpu_ids; 2940 } 2941 if ((!cur_ops->sync && !cur_ops->call) || 2942 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2943 cur_ops == &rcu_busted_ops) { 2944 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2945 fwd_progress = 0; 2946 return 0; 2947 } 2948 if (stall_cpu > 0) { 2949 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2950 fwd_progress = 0; 2951 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2952 return -EINVAL; /* In module, can fail back to user. */ 2953 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2954 return 0; 2955 } 2956 if (fwd_progress_holdoff <= 0) 2957 fwd_progress_holdoff = 1; 2958 if (fwd_progress_div <= 0) 2959 fwd_progress_div = 4; 2960 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 2961 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 2962 if (!rfp || !fwd_prog_tasks) { 2963 kfree(rfp); 2964 kfree(fwd_prog_tasks); 2965 fwd_prog_tasks = NULL; 2966 fwd_progress = 0; 2967 return -ENOMEM; 2968 } 2969 for (i = 0; i < fwd_progress; i++) { 2970 spin_lock_init(&rfp[i].rcu_fwd_lock); 2971 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 2972 rfp[i].rcu_fwd_id = i; 2973 } 2974 mutex_lock(&rcu_fwd_mutex); 2975 rcu_fwds = rfp; 2976 mutex_unlock(&rcu_fwd_mutex); 2977 register_oom_notifier(&rcutorture_oom_nb); 2978 for (i = 0; i < fwd_progress; i++) { 2979 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 2980 if (ret) { 2981 fwd_progress = i; 2982 return ret; 2983 } 2984 } 2985 return 0; 2986 } 2987 2988 static void rcu_torture_fwd_prog_cleanup(void) 2989 { 2990 int i; 2991 struct rcu_fwd *rfp; 2992 2993 if (!rcu_fwds || !fwd_prog_tasks) 2994 return; 2995 for (i = 0; i < fwd_progress; i++) 2996 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 2997 unregister_oom_notifier(&rcutorture_oom_nb); 2998 mutex_lock(&rcu_fwd_mutex); 2999 rfp = rcu_fwds; 3000 rcu_fwds = NULL; 3001 mutex_unlock(&rcu_fwd_mutex); 3002 kfree(rfp); 3003 kfree(fwd_prog_tasks); 3004 fwd_prog_tasks = NULL; 3005 } 3006 3007 /* Callback function for RCU barrier testing. */ 3008 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 3009 { 3010 atomic_inc(&barrier_cbs_invoked); 3011 } 3012 3013 /* IPI handler to get callback posted on desired CPU, if online. */ 3014 static void rcu_torture_barrier1cb(void *rcu_void) 3015 { 3016 struct rcu_head *rhp = rcu_void; 3017 3018 cur_ops->call(rhp, rcu_torture_barrier_cbf); 3019 } 3020 3021 /* kthread function to register callbacks used to test RCU barriers. */ 3022 static int rcu_torture_barrier_cbs(void *arg) 3023 { 3024 long myid = (long)arg; 3025 bool lastphase = false; 3026 bool newphase; 3027 struct rcu_head rcu; 3028 3029 init_rcu_head_on_stack(&rcu); 3030 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 3031 set_user_nice(current, MAX_NICE); 3032 do { 3033 wait_event(barrier_cbs_wq[myid], 3034 (newphase = 3035 smp_load_acquire(&barrier_phase)) != lastphase || 3036 torture_must_stop()); 3037 lastphase = newphase; 3038 if (torture_must_stop()) 3039 break; 3040 /* 3041 * The above smp_load_acquire() ensures barrier_phase load 3042 * is ordered before the following ->call(). 3043 */ 3044 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 3045 &rcu, 1)) { 3046 // IPI failed, so use direct call from current CPU. 3047 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 3048 } 3049 if (atomic_dec_and_test(&barrier_cbs_count)) 3050 wake_up(&barrier_wq); 3051 } while (!torture_must_stop()); 3052 if (cur_ops->cb_barrier != NULL) 3053 cur_ops->cb_barrier(); 3054 destroy_rcu_head_on_stack(&rcu); 3055 torture_kthread_stopping("rcu_torture_barrier_cbs"); 3056 return 0; 3057 } 3058 3059 /* kthread function to drive and coordinate RCU barrier testing. */ 3060 static int rcu_torture_barrier(void *arg) 3061 { 3062 int i; 3063 3064 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 3065 do { 3066 atomic_set(&barrier_cbs_invoked, 0); 3067 atomic_set(&barrier_cbs_count, n_barrier_cbs); 3068 /* Ensure barrier_phase ordered after prior assignments. */ 3069 smp_store_release(&barrier_phase, !barrier_phase); 3070 for (i = 0; i < n_barrier_cbs; i++) 3071 wake_up(&barrier_cbs_wq[i]); 3072 wait_event(barrier_wq, 3073 atomic_read(&barrier_cbs_count) == 0 || 3074 torture_must_stop()); 3075 if (torture_must_stop()) 3076 break; 3077 n_barrier_attempts++; 3078 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 3079 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 3080 n_rcu_torture_barrier_error++; 3081 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 3082 atomic_read(&barrier_cbs_invoked), 3083 n_barrier_cbs); 3084 WARN_ON(1); 3085 // Wait manually for the remaining callbacks 3086 i = 0; 3087 do { 3088 if (WARN_ON(i++ > HZ)) 3089 i = INT_MIN; 3090 schedule_timeout_interruptible(1); 3091 cur_ops->cb_barrier(); 3092 } while (atomic_read(&barrier_cbs_invoked) != 3093 n_barrier_cbs && 3094 !torture_must_stop()); 3095 smp_mb(); // Can't trust ordering if broken. 3096 if (!torture_must_stop()) 3097 pr_err("Recovered: barrier_cbs_invoked = %d\n", 3098 atomic_read(&barrier_cbs_invoked)); 3099 } else { 3100 n_barrier_successes++; 3101 } 3102 schedule_timeout_interruptible(HZ / 10); 3103 } while (!torture_must_stop()); 3104 torture_kthread_stopping("rcu_torture_barrier"); 3105 return 0; 3106 } 3107 3108 /* Initialize RCU barrier testing. */ 3109 static int rcu_torture_barrier_init(void) 3110 { 3111 int i; 3112 int ret; 3113 3114 if (n_barrier_cbs <= 0) 3115 return 0; 3116 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 3117 pr_alert("%s" TORTURE_FLAG 3118 " Call or barrier ops missing for %s,\n", 3119 torture_type, cur_ops->name); 3120 pr_alert("%s" TORTURE_FLAG 3121 " RCU barrier testing omitted from run.\n", 3122 torture_type); 3123 return 0; 3124 } 3125 atomic_set(&barrier_cbs_count, 0); 3126 atomic_set(&barrier_cbs_invoked, 0); 3127 barrier_cbs_tasks = 3128 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 3129 GFP_KERNEL); 3130 barrier_cbs_wq = 3131 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 3132 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 3133 return -ENOMEM; 3134 for (i = 0; i < n_barrier_cbs; i++) { 3135 init_waitqueue_head(&barrier_cbs_wq[i]); 3136 ret = torture_create_kthread(rcu_torture_barrier_cbs, 3137 (void *)(long)i, 3138 barrier_cbs_tasks[i]); 3139 if (ret) 3140 return ret; 3141 } 3142 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 3143 } 3144 3145 /* Clean up after RCU barrier testing. */ 3146 static void rcu_torture_barrier_cleanup(void) 3147 { 3148 int i; 3149 3150 torture_stop_kthread(rcu_torture_barrier, barrier_task); 3151 if (barrier_cbs_tasks != NULL) { 3152 for (i = 0; i < n_barrier_cbs; i++) 3153 torture_stop_kthread(rcu_torture_barrier_cbs, 3154 barrier_cbs_tasks[i]); 3155 kfree(barrier_cbs_tasks); 3156 barrier_cbs_tasks = NULL; 3157 } 3158 if (barrier_cbs_wq != NULL) { 3159 kfree(barrier_cbs_wq); 3160 barrier_cbs_wq = NULL; 3161 } 3162 } 3163 3164 static bool rcu_torture_can_boost(void) 3165 { 3166 static int boost_warn_once; 3167 int prio; 3168 3169 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 3170 return false; 3171 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 3172 return false; 3173 3174 prio = rcu_get_gp_kthreads_prio(); 3175 if (!prio) 3176 return false; 3177 3178 if (prio < 2) { 3179 if (boost_warn_once == 1) 3180 return false; 3181 3182 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 3183 boost_warn_once = 1; 3184 return false; 3185 } 3186 3187 return true; 3188 } 3189 3190 static bool read_exit_child_stop; 3191 static bool read_exit_child_stopped; 3192 static wait_queue_head_t read_exit_wq; 3193 3194 // Child kthread which just does an rcutorture reader and exits. 3195 static int rcu_torture_read_exit_child(void *trsp_in) 3196 { 3197 struct torture_random_state *trsp = trsp_in; 3198 3199 set_user_nice(current, MAX_NICE); 3200 // Minimize time between reading and exiting. 3201 while (!kthread_should_stop()) 3202 schedule_timeout_uninterruptible(1); 3203 (void)rcu_torture_one_read(trsp, -1); 3204 return 0; 3205 } 3206 3207 // Parent kthread which creates and destroys read-exit child kthreads. 3208 static int rcu_torture_read_exit(void *unused) 3209 { 3210 bool errexit = false; 3211 int i; 3212 struct task_struct *tsp; 3213 DEFINE_TORTURE_RANDOM(trs); 3214 3215 // Allocate and initialize. 3216 set_user_nice(current, MAX_NICE); 3217 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 3218 3219 // Each pass through this loop does one read-exit episode. 3220 do { 3221 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 3222 for (i = 0; i < read_exit_burst; i++) { 3223 if (READ_ONCE(read_exit_child_stop)) 3224 break; 3225 stutter_wait("rcu_torture_read_exit"); 3226 // Spawn child. 3227 tsp = kthread_run(rcu_torture_read_exit_child, 3228 &trs, "%s", "rcu_torture_read_exit_child"); 3229 if (IS_ERR(tsp)) { 3230 TOROUT_ERRSTRING("out of memory"); 3231 errexit = true; 3232 break; 3233 } 3234 cond_resched(); 3235 kthread_stop(tsp); 3236 n_read_exits++; 3237 } 3238 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 3239 rcu_barrier(); // Wait for task_struct free, avoid OOM. 3240 i = 0; 3241 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) 3242 schedule_timeout_uninterruptible(HZ); 3243 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 3244 3245 // Clean up and exit. 3246 smp_store_release(&read_exit_child_stopped, true); // After reaping. 3247 smp_mb(); // Store before wakeup. 3248 wake_up(&read_exit_wq); 3249 while (!torture_must_stop()) 3250 schedule_timeout_uninterruptible(1); 3251 torture_kthread_stopping("rcu_torture_read_exit"); 3252 return 0; 3253 } 3254 3255 static int rcu_torture_read_exit_init(void) 3256 { 3257 if (read_exit_burst <= 0) 3258 return 0; 3259 init_waitqueue_head(&read_exit_wq); 3260 read_exit_child_stop = false; 3261 read_exit_child_stopped = false; 3262 return torture_create_kthread(rcu_torture_read_exit, NULL, 3263 read_exit_task); 3264 } 3265 3266 static void rcu_torture_read_exit_cleanup(void) 3267 { 3268 if (!read_exit_task) 3269 return; 3270 WRITE_ONCE(read_exit_child_stop, true); 3271 smp_mb(); // Above write before wait. 3272 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 3273 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 3274 } 3275 3276 static enum cpuhp_state rcutor_hp; 3277 3278 static void 3279 rcu_torture_cleanup(void) 3280 { 3281 int firsttime; 3282 int flags = 0; 3283 unsigned long gp_seq = 0; 3284 int i; 3285 3286 if (torture_cleanup_begin()) { 3287 if (cur_ops->cb_barrier != NULL) { 3288 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3289 cur_ops->cb_barrier(); 3290 } 3291 rcu_gp_slow_unregister(NULL); 3292 return; 3293 } 3294 if (!cur_ops) { 3295 torture_cleanup_end(); 3296 rcu_gp_slow_unregister(NULL); 3297 return; 3298 } 3299 3300 if (cur_ops->gp_kthread_dbg) 3301 cur_ops->gp_kthread_dbg(); 3302 rcu_torture_read_exit_cleanup(); 3303 rcu_torture_barrier_cleanup(); 3304 rcu_torture_fwd_prog_cleanup(); 3305 torture_stop_kthread(rcu_torture_stall, stall_task); 3306 torture_stop_kthread(rcu_torture_writer, writer_task); 3307 3308 if (nocb_tasks) { 3309 for (i = 0; i < nrealnocbers; i++) 3310 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 3311 kfree(nocb_tasks); 3312 nocb_tasks = NULL; 3313 } 3314 3315 if (reader_tasks) { 3316 for (i = 0; i < nrealreaders; i++) 3317 torture_stop_kthread(rcu_torture_reader, 3318 reader_tasks[i]); 3319 kfree(reader_tasks); 3320 reader_tasks = NULL; 3321 } 3322 kfree(rcu_torture_reader_mbchk); 3323 rcu_torture_reader_mbchk = NULL; 3324 3325 if (fakewriter_tasks) { 3326 for (i = 0; i < nfakewriters; i++) 3327 torture_stop_kthread(rcu_torture_fakewriter, 3328 fakewriter_tasks[i]); 3329 kfree(fakewriter_tasks); 3330 fakewriter_tasks = NULL; 3331 } 3332 3333 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3334 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3335 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 3336 cur_ops->name, (long)gp_seq, flags, 3337 rcutorture_seq_diff(gp_seq, start_gp_seq)); 3338 torture_stop_kthread(rcu_torture_stats, stats_task); 3339 torture_stop_kthread(rcu_torture_fqs, fqs_task); 3340 if (rcu_torture_can_boost() && rcutor_hp >= 0) 3341 cpuhp_remove_state(rcutor_hp); 3342 3343 /* 3344 * Wait for all RCU callbacks to fire, then do torture-type-specific 3345 * cleanup operations. 3346 */ 3347 if (cur_ops->cb_barrier != NULL) { 3348 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 3349 cur_ops->cb_barrier(); 3350 } 3351 if (cur_ops->cleanup != NULL) 3352 cur_ops->cleanup(); 3353 3354 rcu_torture_mem_dump_obj(); 3355 3356 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3357 3358 if (err_segs_recorded) { 3359 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3360 if (rt_read_nsegs == 0) 3361 pr_alert("\t: No segments recorded!!!\n"); 3362 firsttime = 1; 3363 for (i = 0; i < rt_read_nsegs; i++) { 3364 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3365 if (err_segs[i].rt_delay_jiffies != 0) { 3366 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3367 err_segs[i].rt_delay_jiffies); 3368 firsttime = 0; 3369 } 3370 if (err_segs[i].rt_delay_ms != 0) { 3371 pr_cont("%s%ldms", firsttime ? "" : "+", 3372 err_segs[i].rt_delay_ms); 3373 firsttime = 0; 3374 } 3375 if (err_segs[i].rt_delay_us != 0) { 3376 pr_cont("%s%ldus", firsttime ? "" : "+", 3377 err_segs[i].rt_delay_us); 3378 firsttime = 0; 3379 } 3380 pr_cont("%s\n", 3381 err_segs[i].rt_preempted ? "preempted" : ""); 3382 3383 } 3384 } 3385 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3386 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3387 else if (torture_onoff_failures()) 3388 rcu_torture_print_module_parms(cur_ops, 3389 "End of test: RCU_HOTPLUG"); 3390 else 3391 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3392 torture_cleanup_end(); 3393 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay); 3394 } 3395 3396 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3397 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3398 { 3399 } 3400 3401 static void rcu_torture_err_cb(struct rcu_head *rhp) 3402 { 3403 /* 3404 * This -might- happen due to race conditions, but is unlikely. 3405 * The scenario that leads to this happening is that the 3406 * first of the pair of duplicate callbacks is queued, 3407 * someone else starts a grace period that includes that 3408 * callback, then the second of the pair must wait for the 3409 * next grace period. Unlikely, but can happen. If it 3410 * does happen, the debug-objects subsystem won't have splatted. 3411 */ 3412 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3413 } 3414 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3415 3416 /* 3417 * Verify that double-free causes debug-objects to complain, but only 3418 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3419 * cannot be carried out. 3420 */ 3421 static void rcu_test_debug_objects(void) 3422 { 3423 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3424 struct rcu_head rh1; 3425 struct rcu_head rh2; 3426 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3427 3428 init_rcu_head_on_stack(&rh1); 3429 init_rcu_head_on_stack(&rh2); 3430 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 3431 3432 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3433 preempt_disable(); /* Prevent preemption from interrupting test. */ 3434 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 3435 call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3436 local_irq_disable(); /* Make it harder to start a new grace period. */ 3437 call_rcu_hurry(&rh2, rcu_torture_leak_cb); 3438 call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3439 if (rhp) { 3440 call_rcu_hurry(rhp, rcu_torture_leak_cb); 3441 call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3442 } 3443 local_irq_enable(); 3444 rcu_read_unlock(); 3445 preempt_enable(); 3446 3447 /* Wait for them all to get done so we can safely return. */ 3448 rcu_barrier(); 3449 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 3450 destroy_rcu_head_on_stack(&rh1); 3451 destroy_rcu_head_on_stack(&rh2); 3452 kfree(rhp); 3453 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3454 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 3455 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3456 } 3457 3458 static void rcutorture_sync(void) 3459 { 3460 static unsigned long n; 3461 3462 if (cur_ops->sync && !(++n & 0xfff)) 3463 cur_ops->sync(); 3464 } 3465 3466 static int __init 3467 rcu_torture_init(void) 3468 { 3469 long i; 3470 int cpu; 3471 int firsterr = 0; 3472 int flags = 0; 3473 unsigned long gp_seq = 0; 3474 static struct rcu_torture_ops *torture_ops[] = { 3475 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3476 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 3477 &trivial_ops, 3478 }; 3479 3480 if (!torture_init_begin(torture_type, verbose)) 3481 return -EBUSY; 3482 3483 /* Process args and tell the world that the torturer is on the job. */ 3484 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3485 cur_ops = torture_ops[i]; 3486 if (strcmp(torture_type, cur_ops->name) == 0) 3487 break; 3488 } 3489 if (i == ARRAY_SIZE(torture_ops)) { 3490 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3491 torture_type); 3492 pr_alert("rcu-torture types:"); 3493 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3494 pr_cont(" %s", torture_ops[i]->name); 3495 pr_cont("\n"); 3496 firsterr = -EINVAL; 3497 cur_ops = NULL; 3498 goto unwind; 3499 } 3500 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3501 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3502 fqs_duration = 0; 3503 } 3504 if (cur_ops->init) 3505 cur_ops->init(); 3506 3507 if (nreaders >= 0) { 3508 nrealreaders = nreaders; 3509 } else { 3510 nrealreaders = num_online_cpus() - 2 - nreaders; 3511 if (nrealreaders <= 0) 3512 nrealreaders = 1; 3513 } 3514 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3515 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3516 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3517 start_gp_seq = gp_seq; 3518 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3519 cur_ops->name, (long)gp_seq, flags); 3520 3521 /* Set up the freelist. */ 3522 3523 INIT_LIST_HEAD(&rcu_torture_freelist); 3524 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3525 rcu_tortures[i].rtort_mbtest = 0; 3526 list_add_tail(&rcu_tortures[i].rtort_free, 3527 &rcu_torture_freelist); 3528 } 3529 3530 /* Initialize the statistics so that each run gets its own numbers. */ 3531 3532 rcu_torture_current = NULL; 3533 rcu_torture_current_version = 0; 3534 atomic_set(&n_rcu_torture_alloc, 0); 3535 atomic_set(&n_rcu_torture_alloc_fail, 0); 3536 atomic_set(&n_rcu_torture_free, 0); 3537 atomic_set(&n_rcu_torture_mberror, 0); 3538 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3539 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3540 atomic_set(&n_rcu_torture_error, 0); 3541 n_rcu_torture_barrier_error = 0; 3542 n_rcu_torture_boost_ktrerror = 0; 3543 n_rcu_torture_boost_rterror = 0; 3544 n_rcu_torture_boost_failure = 0; 3545 n_rcu_torture_boosts = 0; 3546 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3547 atomic_set(&rcu_torture_wcount[i], 0); 3548 for_each_possible_cpu(cpu) { 3549 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3550 per_cpu(rcu_torture_count, cpu)[i] = 0; 3551 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3552 } 3553 } 3554 err_segs_recorded = 0; 3555 rt_read_nsegs = 0; 3556 3557 /* Start up the kthreads. */ 3558 3559 rcu_torture_write_types(); 3560 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3561 writer_task); 3562 if (torture_init_error(firsterr)) 3563 goto unwind; 3564 if (nfakewriters > 0) { 3565 fakewriter_tasks = kcalloc(nfakewriters, 3566 sizeof(fakewriter_tasks[0]), 3567 GFP_KERNEL); 3568 if (fakewriter_tasks == NULL) { 3569 TOROUT_ERRSTRING("out of memory"); 3570 firsterr = -ENOMEM; 3571 goto unwind; 3572 } 3573 } 3574 for (i = 0; i < nfakewriters; i++) { 3575 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3576 NULL, fakewriter_tasks[i]); 3577 if (torture_init_error(firsterr)) 3578 goto unwind; 3579 } 3580 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3581 GFP_KERNEL); 3582 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3583 GFP_KERNEL); 3584 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3585 TOROUT_ERRSTRING("out of memory"); 3586 firsterr = -ENOMEM; 3587 goto unwind; 3588 } 3589 for (i = 0; i < nrealreaders; i++) { 3590 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3591 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3592 reader_tasks[i]); 3593 if (torture_init_error(firsterr)) 3594 goto unwind; 3595 } 3596 nrealnocbers = nocbs_nthreads; 3597 if (WARN_ON(nrealnocbers < 0)) 3598 nrealnocbers = 1; 3599 if (WARN_ON(nocbs_toggle < 0)) 3600 nocbs_toggle = HZ; 3601 if (nrealnocbers > 0) { 3602 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3603 if (nocb_tasks == NULL) { 3604 TOROUT_ERRSTRING("out of memory"); 3605 firsterr = -ENOMEM; 3606 goto unwind; 3607 } 3608 } else { 3609 nocb_tasks = NULL; 3610 } 3611 for (i = 0; i < nrealnocbers; i++) { 3612 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3613 if (torture_init_error(firsterr)) 3614 goto unwind; 3615 } 3616 if (stat_interval > 0) { 3617 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3618 stats_task); 3619 if (torture_init_error(firsterr)) 3620 goto unwind; 3621 } 3622 if (test_no_idle_hz && shuffle_interval > 0) { 3623 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3624 if (torture_init_error(firsterr)) 3625 goto unwind; 3626 } 3627 if (stutter < 0) 3628 stutter = 0; 3629 if (stutter) { 3630 int t; 3631 3632 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3633 firsterr = torture_stutter_init(stutter * HZ, t); 3634 if (torture_init_error(firsterr)) 3635 goto unwind; 3636 } 3637 if (fqs_duration < 0) 3638 fqs_duration = 0; 3639 if (fqs_duration) { 3640 /* Create the fqs thread */ 3641 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3642 fqs_task); 3643 if (torture_init_error(firsterr)) 3644 goto unwind; 3645 } 3646 if (test_boost_interval < 1) 3647 test_boost_interval = 1; 3648 if (test_boost_duration < 2) 3649 test_boost_duration = 2; 3650 if (rcu_torture_can_boost()) { 3651 3652 boost_starttime = jiffies + test_boost_interval * HZ; 3653 3654 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3655 rcutorture_booster_init, 3656 rcutorture_booster_cleanup); 3657 rcutor_hp = firsterr; 3658 if (torture_init_error(firsterr)) 3659 goto unwind; 3660 } 3661 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3662 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3663 if (torture_init_error(firsterr)) 3664 goto unwind; 3665 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3666 rcutorture_sync); 3667 if (torture_init_error(firsterr)) 3668 goto unwind; 3669 firsterr = rcu_torture_stall_init(); 3670 if (torture_init_error(firsterr)) 3671 goto unwind; 3672 firsterr = rcu_torture_fwd_prog_init(); 3673 if (torture_init_error(firsterr)) 3674 goto unwind; 3675 firsterr = rcu_torture_barrier_init(); 3676 if (torture_init_error(firsterr)) 3677 goto unwind; 3678 firsterr = rcu_torture_read_exit_init(); 3679 if (torture_init_error(firsterr)) 3680 goto unwind; 3681 if (object_debug) 3682 rcu_test_debug_objects(); 3683 torture_init_end(); 3684 rcu_gp_slow_register(&rcu_fwd_cb_nodelay); 3685 return 0; 3686 3687 unwind: 3688 torture_init_end(); 3689 rcu_torture_cleanup(); 3690 if (shutdown_secs) { 3691 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3692 kernel_power_off(); 3693 } 3694 return firsterr; 3695 } 3696 3697 module_init(rcu_torture_init); 3698 module_exit(rcu_torture_cleanup); 3699