1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.txt 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 48 #include "rcu.h" 49 50 MODULE_LICENSE("GPL"); 51 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 52 53 54 /* Bits for ->extendables field, extendables param, and related definitions. */ 55 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 56 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 57 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 58 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 59 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 60 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 61 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 62 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 63 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 64 #define RCUTORTURE_MAX_EXTEND \ 65 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 66 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 67 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 68 /* Must be power of two minus one. */ 69 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 70 71 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 72 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 73 torture_param(int, fqs_duration, 0, 74 "Duration of fqs bursts (us), 0 to disable"); 75 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 76 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 77 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 78 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 79 torture_param(int, fwd_progress_holdoff, 60, 80 "Time between forward-progress tests (s)"); 81 torture_param(bool, fwd_progress_need_resched, 1, 82 "Hide cond_resched() behind need_resched()"); 83 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 84 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 85 torture_param(bool, gp_normal, false, 86 "Use normal (non-expedited) GP wait primitives"); 87 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 88 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 89 torture_param(int, n_barrier_cbs, 0, 90 "# of callbacks/kthreads for barrier testing"); 91 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 92 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 93 torture_param(int, object_debug, 0, 94 "Enable debug-object double call_rcu() testing"); 95 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 96 torture_param(int, onoff_interval, 0, 97 "Time between CPU hotplugs (jiffies), 0=disable"); 98 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 99 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 100 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 101 torture_param(int, stall_cpu_holdoff, 10, 102 "Time to wait before starting stall (s)."); 103 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 104 torture_param(int, stat_interval, 60, 105 "Number of seconds between stats printk()s"); 106 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 107 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 108 torture_param(int, test_boost_duration, 4, 109 "Duration of each boost test, seconds."); 110 torture_param(int, test_boost_interval, 7, 111 "Interval between boost tests, seconds."); 112 torture_param(bool, test_no_idle_hz, true, 113 "Test support for tickless idle CPUs"); 114 torture_param(int, verbose, 1, 115 "Enable verbose debugging printk()s"); 116 117 static char *torture_type = "rcu"; 118 module_param(torture_type, charp, 0444); 119 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 120 121 static int nrealreaders; 122 static struct task_struct *writer_task; 123 static struct task_struct **fakewriter_tasks; 124 static struct task_struct **reader_tasks; 125 static struct task_struct *stats_task; 126 static struct task_struct *fqs_task; 127 static struct task_struct *boost_tasks[NR_CPUS]; 128 static struct task_struct *stall_task; 129 static struct task_struct *fwd_prog_task; 130 static struct task_struct **barrier_cbs_tasks; 131 static struct task_struct *barrier_task; 132 133 #define RCU_TORTURE_PIPE_LEN 10 134 135 struct rcu_torture { 136 struct rcu_head rtort_rcu; 137 int rtort_pipe_count; 138 struct list_head rtort_free; 139 int rtort_mbtest; 140 }; 141 142 static LIST_HEAD(rcu_torture_freelist); 143 static struct rcu_torture __rcu *rcu_torture_current; 144 static unsigned long rcu_torture_current_version; 145 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 146 static DEFINE_SPINLOCK(rcu_torture_lock); 147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 149 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 150 static atomic_t n_rcu_torture_alloc; 151 static atomic_t n_rcu_torture_alloc_fail; 152 static atomic_t n_rcu_torture_free; 153 static atomic_t n_rcu_torture_mberror; 154 static atomic_t n_rcu_torture_error; 155 static long n_rcu_torture_barrier_error; 156 static long n_rcu_torture_boost_ktrerror; 157 static long n_rcu_torture_boost_rterror; 158 static long n_rcu_torture_boost_failure; 159 static long n_rcu_torture_boosts; 160 static atomic_long_t n_rcu_torture_timers; 161 static long n_barrier_attempts; 162 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 163 static struct list_head rcu_torture_removed; 164 static unsigned long shutdown_jiffies; 165 166 static int rcu_torture_writer_state; 167 #define RTWS_FIXED_DELAY 0 168 #define RTWS_DELAY 1 169 #define RTWS_REPLACE 2 170 #define RTWS_DEF_FREE 3 171 #define RTWS_EXP_SYNC 4 172 #define RTWS_COND_GET 5 173 #define RTWS_COND_SYNC 6 174 #define RTWS_SYNC 7 175 #define RTWS_STUTTER 8 176 #define RTWS_STOPPING 9 177 static const char * const rcu_torture_writer_state_names[] = { 178 "RTWS_FIXED_DELAY", 179 "RTWS_DELAY", 180 "RTWS_REPLACE", 181 "RTWS_DEF_FREE", 182 "RTWS_EXP_SYNC", 183 "RTWS_COND_GET", 184 "RTWS_COND_SYNC", 185 "RTWS_SYNC", 186 "RTWS_STUTTER", 187 "RTWS_STOPPING", 188 }; 189 190 /* Record reader segment types and duration for first failing read. */ 191 struct rt_read_seg { 192 int rt_readstate; 193 unsigned long rt_delay_jiffies; 194 unsigned long rt_delay_ms; 195 unsigned long rt_delay_us; 196 bool rt_preempted; 197 }; 198 static int err_segs_recorded; 199 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 200 static int rt_read_nsegs; 201 202 static const char *rcu_torture_writer_state_getname(void) 203 { 204 unsigned int i = READ_ONCE(rcu_torture_writer_state); 205 206 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 207 return "???"; 208 return rcu_torture_writer_state_names[i]; 209 } 210 211 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 212 #define rcu_can_boost() 1 213 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 214 #define rcu_can_boost() 0 215 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 216 217 #ifdef CONFIG_RCU_TRACE 218 static u64 notrace rcu_trace_clock_local(void) 219 { 220 u64 ts = trace_clock_local(); 221 222 (void)do_div(ts, NSEC_PER_USEC); 223 return ts; 224 } 225 #else /* #ifdef CONFIG_RCU_TRACE */ 226 static u64 notrace rcu_trace_clock_local(void) 227 { 228 return 0ULL; 229 } 230 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 231 232 /* 233 * Stop aggressive CPU-hog tests a bit before the end of the test in order 234 * to avoid interfering with test shutdown. 235 */ 236 static bool shutdown_time_arrived(void) 237 { 238 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 239 } 240 241 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 242 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 243 /* and boost task create/destroy. */ 244 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 245 static bool barrier_phase; /* Test phase. */ 246 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 247 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 248 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 249 250 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 251 252 /* 253 * Allocate an element from the rcu_tortures pool. 254 */ 255 static struct rcu_torture * 256 rcu_torture_alloc(void) 257 { 258 struct list_head *p; 259 260 spin_lock_bh(&rcu_torture_lock); 261 if (list_empty(&rcu_torture_freelist)) { 262 atomic_inc(&n_rcu_torture_alloc_fail); 263 spin_unlock_bh(&rcu_torture_lock); 264 return NULL; 265 } 266 atomic_inc(&n_rcu_torture_alloc); 267 p = rcu_torture_freelist.next; 268 list_del_init(p); 269 spin_unlock_bh(&rcu_torture_lock); 270 return container_of(p, struct rcu_torture, rtort_free); 271 } 272 273 /* 274 * Free an element to the rcu_tortures pool. 275 */ 276 static void 277 rcu_torture_free(struct rcu_torture *p) 278 { 279 atomic_inc(&n_rcu_torture_free); 280 spin_lock_bh(&rcu_torture_lock); 281 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 282 spin_unlock_bh(&rcu_torture_lock); 283 } 284 285 /* 286 * Operations vector for selecting different types of tests. 287 */ 288 289 struct rcu_torture_ops { 290 int ttype; 291 void (*init)(void); 292 void (*cleanup)(void); 293 int (*readlock)(void); 294 void (*read_delay)(struct torture_random_state *rrsp, 295 struct rt_read_seg *rtrsp); 296 void (*readunlock)(int idx); 297 unsigned long (*get_gp_seq)(void); 298 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 299 void (*deferred_free)(struct rcu_torture *p); 300 void (*sync)(void); 301 void (*exp_sync)(void); 302 unsigned long (*get_state)(void); 303 void (*cond_sync)(unsigned long oldstate); 304 call_rcu_func_t call; 305 void (*cb_barrier)(void); 306 void (*fqs)(void); 307 void (*stats)(void); 308 int (*stall_dur)(void); 309 int irq_capable; 310 int can_boost; 311 int extendables; 312 int slow_gps; 313 const char *name; 314 }; 315 316 static struct rcu_torture_ops *cur_ops; 317 318 /* 319 * Definitions for rcu torture testing. 320 */ 321 322 static int rcu_torture_read_lock(void) __acquires(RCU) 323 { 324 rcu_read_lock(); 325 return 0; 326 } 327 328 static void 329 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 330 { 331 unsigned long started; 332 unsigned long completed; 333 const unsigned long shortdelay_us = 200; 334 unsigned long longdelay_ms = 300; 335 unsigned long long ts; 336 337 /* We want a short delay sometimes to make a reader delay the grace 338 * period, and we want a long delay occasionally to trigger 339 * force_quiescent_state. */ 340 341 if (!rcu_fwd_cb_nodelay && 342 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 343 started = cur_ops->get_gp_seq(); 344 ts = rcu_trace_clock_local(); 345 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 346 longdelay_ms = 5; /* Avoid triggering BH limits. */ 347 mdelay(longdelay_ms); 348 rtrsp->rt_delay_ms = longdelay_ms; 349 completed = cur_ops->get_gp_seq(); 350 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 351 started, completed); 352 } 353 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 354 udelay(shortdelay_us); 355 rtrsp->rt_delay_us = shortdelay_us; 356 } 357 if (!preempt_count() && 358 !(torture_random(rrsp) % (nrealreaders * 500))) { 359 torture_preempt_schedule(); /* QS only if preemptible. */ 360 rtrsp->rt_preempted = true; 361 } 362 } 363 364 static void rcu_torture_read_unlock(int idx) __releases(RCU) 365 { 366 rcu_read_unlock(); 367 } 368 369 /* 370 * Update callback in the pipe. This should be invoked after a grace period. 371 */ 372 static bool 373 rcu_torture_pipe_update_one(struct rcu_torture *rp) 374 { 375 int i; 376 377 i = rp->rtort_pipe_count; 378 if (i > RCU_TORTURE_PIPE_LEN) 379 i = RCU_TORTURE_PIPE_LEN; 380 atomic_inc(&rcu_torture_wcount[i]); 381 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 382 rp->rtort_mbtest = 0; 383 return true; 384 } 385 return false; 386 } 387 388 /* 389 * Update all callbacks in the pipe. Suitable for synchronous grace-period 390 * primitives. 391 */ 392 static void 393 rcu_torture_pipe_update(struct rcu_torture *old_rp) 394 { 395 struct rcu_torture *rp; 396 struct rcu_torture *rp1; 397 398 if (old_rp) 399 list_add(&old_rp->rtort_free, &rcu_torture_removed); 400 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 401 if (rcu_torture_pipe_update_one(rp)) { 402 list_del(&rp->rtort_free); 403 rcu_torture_free(rp); 404 } 405 } 406 } 407 408 static void 409 rcu_torture_cb(struct rcu_head *p) 410 { 411 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 412 413 if (torture_must_stop_irq()) { 414 /* Test is ending, just drop callbacks on the floor. */ 415 /* The next initialization will pick up the pieces. */ 416 return; 417 } 418 if (rcu_torture_pipe_update_one(rp)) 419 rcu_torture_free(rp); 420 else 421 cur_ops->deferred_free(rp); 422 } 423 424 static unsigned long rcu_no_completed(void) 425 { 426 return 0; 427 } 428 429 static void rcu_torture_deferred_free(struct rcu_torture *p) 430 { 431 call_rcu(&p->rtort_rcu, rcu_torture_cb); 432 } 433 434 static void rcu_sync_torture_init(void) 435 { 436 INIT_LIST_HEAD(&rcu_torture_removed); 437 } 438 439 static struct rcu_torture_ops rcu_ops = { 440 .ttype = RCU_FLAVOR, 441 .init = rcu_sync_torture_init, 442 .readlock = rcu_torture_read_lock, 443 .read_delay = rcu_read_delay, 444 .readunlock = rcu_torture_read_unlock, 445 .get_gp_seq = rcu_get_gp_seq, 446 .gp_diff = rcu_seq_diff, 447 .deferred_free = rcu_torture_deferred_free, 448 .sync = synchronize_rcu, 449 .exp_sync = synchronize_rcu_expedited, 450 .get_state = get_state_synchronize_rcu, 451 .cond_sync = cond_synchronize_rcu, 452 .call = call_rcu, 453 .cb_barrier = rcu_barrier, 454 .fqs = rcu_force_quiescent_state, 455 .stats = NULL, 456 .stall_dur = rcu_jiffies_till_stall_check, 457 .irq_capable = 1, 458 .can_boost = rcu_can_boost(), 459 .extendables = RCUTORTURE_MAX_EXTEND, 460 .name = "rcu" 461 }; 462 463 /* 464 * Don't even think about trying any of these in real life!!! 465 * The names includes "busted", and they really means it! 466 * The only purpose of these functions is to provide a buggy RCU 467 * implementation to make sure that rcutorture correctly emits 468 * buggy-RCU error messages. 469 */ 470 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 471 { 472 /* This is a deliberate bug for testing purposes only! */ 473 rcu_torture_cb(&p->rtort_rcu); 474 } 475 476 static void synchronize_rcu_busted(void) 477 { 478 /* This is a deliberate bug for testing purposes only! */ 479 } 480 481 static void 482 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 483 { 484 /* This is a deliberate bug for testing purposes only! */ 485 func(head); 486 } 487 488 static struct rcu_torture_ops rcu_busted_ops = { 489 .ttype = INVALID_RCU_FLAVOR, 490 .init = rcu_sync_torture_init, 491 .readlock = rcu_torture_read_lock, 492 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 493 .readunlock = rcu_torture_read_unlock, 494 .get_gp_seq = rcu_no_completed, 495 .deferred_free = rcu_busted_torture_deferred_free, 496 .sync = synchronize_rcu_busted, 497 .exp_sync = synchronize_rcu_busted, 498 .call = call_rcu_busted, 499 .cb_barrier = NULL, 500 .fqs = NULL, 501 .stats = NULL, 502 .irq_capable = 1, 503 .name = "busted" 504 }; 505 506 /* 507 * Definitions for srcu torture testing. 508 */ 509 510 DEFINE_STATIC_SRCU(srcu_ctl); 511 static struct srcu_struct srcu_ctld; 512 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 513 514 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 515 { 516 return srcu_read_lock(srcu_ctlp); 517 } 518 519 static void 520 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 521 { 522 long delay; 523 const long uspertick = 1000000 / HZ; 524 const long longdelay = 10; 525 526 /* We want there to be long-running readers, but not all the time. */ 527 528 delay = torture_random(rrsp) % 529 (nrealreaders * 2 * longdelay * uspertick); 530 if (!delay && in_task()) { 531 schedule_timeout_interruptible(longdelay); 532 rtrsp->rt_delay_jiffies = longdelay; 533 } else { 534 rcu_read_delay(rrsp, rtrsp); 535 } 536 } 537 538 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 539 { 540 srcu_read_unlock(srcu_ctlp, idx); 541 } 542 543 static unsigned long srcu_torture_completed(void) 544 { 545 return srcu_batches_completed(srcu_ctlp); 546 } 547 548 static void srcu_torture_deferred_free(struct rcu_torture *rp) 549 { 550 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 551 } 552 553 static void srcu_torture_synchronize(void) 554 { 555 synchronize_srcu(srcu_ctlp); 556 } 557 558 static void srcu_torture_call(struct rcu_head *head, 559 rcu_callback_t func) 560 { 561 call_srcu(srcu_ctlp, head, func); 562 } 563 564 static void srcu_torture_barrier(void) 565 { 566 srcu_barrier(srcu_ctlp); 567 } 568 569 static void srcu_torture_stats(void) 570 { 571 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 572 } 573 574 static void srcu_torture_synchronize_expedited(void) 575 { 576 synchronize_srcu_expedited(srcu_ctlp); 577 } 578 579 static struct rcu_torture_ops srcu_ops = { 580 .ttype = SRCU_FLAVOR, 581 .init = rcu_sync_torture_init, 582 .readlock = srcu_torture_read_lock, 583 .read_delay = srcu_read_delay, 584 .readunlock = srcu_torture_read_unlock, 585 .get_gp_seq = srcu_torture_completed, 586 .deferred_free = srcu_torture_deferred_free, 587 .sync = srcu_torture_synchronize, 588 .exp_sync = srcu_torture_synchronize_expedited, 589 .call = srcu_torture_call, 590 .cb_barrier = srcu_torture_barrier, 591 .stats = srcu_torture_stats, 592 .irq_capable = 1, 593 .name = "srcu" 594 }; 595 596 static void srcu_torture_init(void) 597 { 598 rcu_sync_torture_init(); 599 WARN_ON(init_srcu_struct(&srcu_ctld)); 600 srcu_ctlp = &srcu_ctld; 601 } 602 603 static void srcu_torture_cleanup(void) 604 { 605 cleanup_srcu_struct(&srcu_ctld); 606 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 607 } 608 609 /* As above, but dynamically allocated. */ 610 static struct rcu_torture_ops srcud_ops = { 611 .ttype = SRCU_FLAVOR, 612 .init = srcu_torture_init, 613 .cleanup = srcu_torture_cleanup, 614 .readlock = srcu_torture_read_lock, 615 .read_delay = srcu_read_delay, 616 .readunlock = srcu_torture_read_unlock, 617 .get_gp_seq = srcu_torture_completed, 618 .deferred_free = srcu_torture_deferred_free, 619 .sync = srcu_torture_synchronize, 620 .exp_sync = srcu_torture_synchronize_expedited, 621 .call = srcu_torture_call, 622 .cb_barrier = srcu_torture_barrier, 623 .stats = srcu_torture_stats, 624 .irq_capable = 1, 625 .name = "srcud" 626 }; 627 628 /* As above, but broken due to inappropriate reader extension. */ 629 static struct rcu_torture_ops busted_srcud_ops = { 630 .ttype = SRCU_FLAVOR, 631 .init = srcu_torture_init, 632 .cleanup = srcu_torture_cleanup, 633 .readlock = srcu_torture_read_lock, 634 .read_delay = rcu_read_delay, 635 .readunlock = srcu_torture_read_unlock, 636 .get_gp_seq = srcu_torture_completed, 637 .deferred_free = srcu_torture_deferred_free, 638 .sync = srcu_torture_synchronize, 639 .exp_sync = srcu_torture_synchronize_expedited, 640 .call = srcu_torture_call, 641 .cb_barrier = srcu_torture_barrier, 642 .stats = srcu_torture_stats, 643 .irq_capable = 1, 644 .extendables = RCUTORTURE_MAX_EXTEND, 645 .name = "busted_srcud" 646 }; 647 648 /* 649 * Definitions for RCU-tasks torture testing. 650 */ 651 652 static int tasks_torture_read_lock(void) 653 { 654 return 0; 655 } 656 657 static void tasks_torture_read_unlock(int idx) 658 { 659 } 660 661 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 662 { 663 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 664 } 665 666 static struct rcu_torture_ops tasks_ops = { 667 .ttype = RCU_TASKS_FLAVOR, 668 .init = rcu_sync_torture_init, 669 .readlock = tasks_torture_read_lock, 670 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 671 .readunlock = tasks_torture_read_unlock, 672 .get_gp_seq = rcu_no_completed, 673 .deferred_free = rcu_tasks_torture_deferred_free, 674 .sync = synchronize_rcu_tasks, 675 .exp_sync = synchronize_rcu_tasks, 676 .call = call_rcu_tasks, 677 .cb_barrier = rcu_barrier_tasks, 678 .fqs = NULL, 679 .stats = NULL, 680 .irq_capable = 1, 681 .slow_gps = 1, 682 .name = "tasks" 683 }; 684 685 /* 686 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 687 * This implementation does not necessarily work well with CPU hotplug. 688 */ 689 690 static void synchronize_rcu_trivial(void) 691 { 692 int cpu; 693 694 for_each_online_cpu(cpu) { 695 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 696 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 697 } 698 } 699 700 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 701 { 702 preempt_disable(); 703 return 0; 704 } 705 706 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 707 { 708 preempt_enable(); 709 } 710 711 static struct rcu_torture_ops trivial_ops = { 712 .ttype = RCU_TRIVIAL_FLAVOR, 713 .init = rcu_sync_torture_init, 714 .readlock = rcu_torture_read_lock_trivial, 715 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 716 .readunlock = rcu_torture_read_unlock_trivial, 717 .get_gp_seq = rcu_no_completed, 718 .sync = synchronize_rcu_trivial, 719 .exp_sync = synchronize_rcu_trivial, 720 .fqs = NULL, 721 .stats = NULL, 722 .irq_capable = 1, 723 .name = "trivial" 724 }; 725 726 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 727 { 728 if (!cur_ops->gp_diff) 729 return new - old; 730 return cur_ops->gp_diff(new, old); 731 } 732 733 static bool __maybe_unused torturing_tasks(void) 734 { 735 return cur_ops == &tasks_ops; 736 } 737 738 /* 739 * RCU torture priority-boost testing. Runs one real-time thread per 740 * CPU for moderate bursts, repeatedly registering RCU callbacks and 741 * spinning waiting for them to be invoked. If a given callback takes 742 * too long to be invoked, we assume that priority inversion has occurred. 743 */ 744 745 struct rcu_boost_inflight { 746 struct rcu_head rcu; 747 int inflight; 748 }; 749 750 static void rcu_torture_boost_cb(struct rcu_head *head) 751 { 752 struct rcu_boost_inflight *rbip = 753 container_of(head, struct rcu_boost_inflight, rcu); 754 755 /* Ensure RCU-core accesses precede clearing ->inflight */ 756 smp_store_release(&rbip->inflight, 0); 757 } 758 759 static int old_rt_runtime = -1; 760 761 static void rcu_torture_disable_rt_throttle(void) 762 { 763 /* 764 * Disable RT throttling so that rcutorture's boost threads don't get 765 * throttled. Only possible if rcutorture is built-in otherwise the 766 * user should manually do this by setting the sched_rt_period_us and 767 * sched_rt_runtime sysctls. 768 */ 769 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 770 return; 771 772 old_rt_runtime = sysctl_sched_rt_runtime; 773 sysctl_sched_rt_runtime = -1; 774 } 775 776 static void rcu_torture_enable_rt_throttle(void) 777 { 778 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 779 return; 780 781 sysctl_sched_rt_runtime = old_rt_runtime; 782 old_rt_runtime = -1; 783 } 784 785 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 786 { 787 if (end - start > test_boost_duration * HZ - HZ / 2) { 788 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 789 n_rcu_torture_boost_failure++; 790 791 return true; /* failed */ 792 } 793 794 return false; /* passed */ 795 } 796 797 static int rcu_torture_boost(void *arg) 798 { 799 unsigned long call_rcu_time; 800 unsigned long endtime; 801 unsigned long oldstarttime; 802 struct rcu_boost_inflight rbi = { .inflight = 0 }; 803 struct sched_param sp; 804 805 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 806 807 /* Set real-time priority. */ 808 sp.sched_priority = 1; 809 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 810 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 811 n_rcu_torture_boost_rterror++; 812 } 813 814 init_rcu_head_on_stack(&rbi.rcu); 815 /* Each pass through the following loop does one boost-test cycle. */ 816 do { 817 /* Track if the test failed already in this test interval? */ 818 bool failed = false; 819 820 /* Increment n_rcu_torture_boosts once per boost-test */ 821 while (!kthread_should_stop()) { 822 if (mutex_trylock(&boost_mutex)) { 823 n_rcu_torture_boosts++; 824 mutex_unlock(&boost_mutex); 825 break; 826 } 827 schedule_timeout_uninterruptible(1); 828 } 829 if (kthread_should_stop()) 830 goto checkwait; 831 832 /* Wait for the next test interval. */ 833 oldstarttime = boost_starttime; 834 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 835 schedule_timeout_interruptible(oldstarttime - jiffies); 836 stutter_wait("rcu_torture_boost"); 837 if (torture_must_stop()) 838 goto checkwait; 839 } 840 841 /* Do one boost-test interval. */ 842 endtime = oldstarttime + test_boost_duration * HZ; 843 call_rcu_time = jiffies; 844 while (ULONG_CMP_LT(jiffies, endtime)) { 845 /* If we don't have a callback in flight, post one. */ 846 if (!smp_load_acquire(&rbi.inflight)) { 847 /* RCU core before ->inflight = 1. */ 848 smp_store_release(&rbi.inflight, 1); 849 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 850 /* Check if the boost test failed */ 851 failed = failed || 852 rcu_torture_boost_failed(call_rcu_time, 853 jiffies); 854 call_rcu_time = jiffies; 855 } 856 stutter_wait("rcu_torture_boost"); 857 if (torture_must_stop()) 858 goto checkwait; 859 } 860 861 /* 862 * If boost never happened, then inflight will always be 1, in 863 * this case the boost check would never happen in the above 864 * loop so do another one here. 865 */ 866 if (!failed && smp_load_acquire(&rbi.inflight)) 867 rcu_torture_boost_failed(call_rcu_time, jiffies); 868 869 /* 870 * Set the start time of the next test interval. 871 * Yes, this is vulnerable to long delays, but such 872 * delays simply cause a false negative for the next 873 * interval. Besides, we are running at RT priority, 874 * so delays should be relatively rare. 875 */ 876 while (oldstarttime == boost_starttime && 877 !kthread_should_stop()) { 878 if (mutex_trylock(&boost_mutex)) { 879 boost_starttime = jiffies + 880 test_boost_interval * HZ; 881 mutex_unlock(&boost_mutex); 882 break; 883 } 884 schedule_timeout_uninterruptible(1); 885 } 886 887 /* Go do the stutter. */ 888 checkwait: stutter_wait("rcu_torture_boost"); 889 } while (!torture_must_stop()); 890 891 /* Clean up and exit. */ 892 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 893 torture_shutdown_absorb("rcu_torture_boost"); 894 schedule_timeout_uninterruptible(1); 895 } 896 destroy_rcu_head_on_stack(&rbi.rcu); 897 torture_kthread_stopping("rcu_torture_boost"); 898 return 0; 899 } 900 901 /* 902 * RCU torture force-quiescent-state kthread. Repeatedly induces 903 * bursts of calls to force_quiescent_state(), increasing the probability 904 * of occurrence of some important types of race conditions. 905 */ 906 static int 907 rcu_torture_fqs(void *arg) 908 { 909 unsigned long fqs_resume_time; 910 int fqs_burst_remaining; 911 912 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 913 do { 914 fqs_resume_time = jiffies + fqs_stutter * HZ; 915 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 916 !kthread_should_stop()) { 917 schedule_timeout_interruptible(1); 918 } 919 fqs_burst_remaining = fqs_duration; 920 while (fqs_burst_remaining > 0 && 921 !kthread_should_stop()) { 922 cur_ops->fqs(); 923 udelay(fqs_holdoff); 924 fqs_burst_remaining -= fqs_holdoff; 925 } 926 stutter_wait("rcu_torture_fqs"); 927 } while (!torture_must_stop()); 928 torture_kthread_stopping("rcu_torture_fqs"); 929 return 0; 930 } 931 932 /* 933 * RCU torture writer kthread. Repeatedly substitutes a new structure 934 * for that pointed to by rcu_torture_current, freeing the old structure 935 * after a series of grace periods (the "pipeline"). 936 */ 937 static int 938 rcu_torture_writer(void *arg) 939 { 940 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 941 int expediting = 0; 942 unsigned long gp_snap; 943 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 944 bool gp_sync1 = gp_sync; 945 int i; 946 struct rcu_torture *rp; 947 struct rcu_torture *old_rp; 948 static DEFINE_TORTURE_RANDOM(rand); 949 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 950 RTWS_COND_GET, RTWS_SYNC }; 951 int nsynctypes = 0; 952 953 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 954 if (!can_expedite) 955 pr_alert("%s" TORTURE_FLAG 956 " GP expediting controlled from boot/sysfs for %s.\n", 957 torture_type, cur_ops->name); 958 959 /* Initialize synctype[] array. If none set, take default. */ 960 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 961 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 962 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 963 synctype[nsynctypes++] = RTWS_COND_GET; 964 pr_info("%s: Testing conditional GPs.\n", __func__); 965 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 966 pr_alert("%s: gp_cond without primitives.\n", __func__); 967 } 968 if (gp_exp1 && cur_ops->exp_sync) { 969 synctype[nsynctypes++] = RTWS_EXP_SYNC; 970 pr_info("%s: Testing expedited GPs.\n", __func__); 971 } else if (gp_exp && !cur_ops->exp_sync) { 972 pr_alert("%s: gp_exp without primitives.\n", __func__); 973 } 974 if (gp_normal1 && cur_ops->deferred_free) { 975 synctype[nsynctypes++] = RTWS_DEF_FREE; 976 pr_info("%s: Testing asynchronous GPs.\n", __func__); 977 } else if (gp_normal && !cur_ops->deferred_free) { 978 pr_alert("%s: gp_normal without primitives.\n", __func__); 979 } 980 if (gp_sync1 && cur_ops->sync) { 981 synctype[nsynctypes++] = RTWS_SYNC; 982 pr_info("%s: Testing normal GPs.\n", __func__); 983 } else if (gp_sync && !cur_ops->sync) { 984 pr_alert("%s: gp_sync without primitives.\n", __func__); 985 } 986 if (WARN_ONCE(nsynctypes == 0, 987 "rcu_torture_writer: No update-side primitives.\n")) { 988 /* 989 * No updates primitives, so don't try updating. 990 * The resulting test won't be testing much, hence the 991 * above WARN_ONCE(). 992 */ 993 rcu_torture_writer_state = RTWS_STOPPING; 994 torture_kthread_stopping("rcu_torture_writer"); 995 } 996 997 do { 998 rcu_torture_writer_state = RTWS_FIXED_DELAY; 999 schedule_timeout_uninterruptible(1); 1000 rp = rcu_torture_alloc(); 1001 if (rp == NULL) 1002 continue; 1003 rp->rtort_pipe_count = 0; 1004 rcu_torture_writer_state = RTWS_DELAY; 1005 udelay(torture_random(&rand) & 0x3ff); 1006 rcu_torture_writer_state = RTWS_REPLACE; 1007 old_rp = rcu_dereference_check(rcu_torture_current, 1008 current == writer_task); 1009 rp->rtort_mbtest = 1; 1010 rcu_assign_pointer(rcu_torture_current, rp); 1011 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1012 if (old_rp) { 1013 i = old_rp->rtort_pipe_count; 1014 if (i > RCU_TORTURE_PIPE_LEN) 1015 i = RCU_TORTURE_PIPE_LEN; 1016 atomic_inc(&rcu_torture_wcount[i]); 1017 old_rp->rtort_pipe_count++; 1018 switch (synctype[torture_random(&rand) % nsynctypes]) { 1019 case RTWS_DEF_FREE: 1020 rcu_torture_writer_state = RTWS_DEF_FREE; 1021 cur_ops->deferred_free(old_rp); 1022 break; 1023 case RTWS_EXP_SYNC: 1024 rcu_torture_writer_state = RTWS_EXP_SYNC; 1025 cur_ops->exp_sync(); 1026 rcu_torture_pipe_update(old_rp); 1027 break; 1028 case RTWS_COND_GET: 1029 rcu_torture_writer_state = RTWS_COND_GET; 1030 gp_snap = cur_ops->get_state(); 1031 i = torture_random(&rand) % 16; 1032 if (i != 0) 1033 schedule_timeout_interruptible(i); 1034 udelay(torture_random(&rand) % 1000); 1035 rcu_torture_writer_state = RTWS_COND_SYNC; 1036 cur_ops->cond_sync(gp_snap); 1037 rcu_torture_pipe_update(old_rp); 1038 break; 1039 case RTWS_SYNC: 1040 rcu_torture_writer_state = RTWS_SYNC; 1041 cur_ops->sync(); 1042 rcu_torture_pipe_update(old_rp); 1043 break; 1044 default: 1045 WARN_ON_ONCE(1); 1046 break; 1047 } 1048 } 1049 WRITE_ONCE(rcu_torture_current_version, 1050 rcu_torture_current_version + 1); 1051 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1052 if (can_expedite && 1053 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1054 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1055 if (expediting >= 0) 1056 rcu_expedite_gp(); 1057 else 1058 rcu_unexpedite_gp(); 1059 if (++expediting > 3) 1060 expediting = -expediting; 1061 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1062 can_expedite = !rcu_gp_is_expedited() && 1063 !rcu_gp_is_normal(); 1064 } 1065 rcu_torture_writer_state = RTWS_STUTTER; 1066 if (stutter_wait("rcu_torture_writer") && 1067 !READ_ONCE(rcu_fwd_cb_nodelay) && 1068 !cur_ops->slow_gps && 1069 !torture_must_stop()) 1070 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1071 if (list_empty(&rcu_tortures[i].rtort_free) && 1072 rcu_access_pointer(rcu_torture_current) != 1073 &rcu_tortures[i]) { 1074 rcu_ftrace_dump(DUMP_ALL); 1075 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1076 } 1077 } while (!torture_must_stop()); 1078 /* Reset expediting back to unexpedited. */ 1079 if (expediting > 0) 1080 expediting = -expediting; 1081 while (can_expedite && expediting++ < 0) 1082 rcu_unexpedite_gp(); 1083 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1084 if (!can_expedite) 1085 pr_alert("%s" TORTURE_FLAG 1086 " Dynamic grace-period expediting was disabled.\n", 1087 torture_type); 1088 rcu_torture_writer_state = RTWS_STOPPING; 1089 torture_kthread_stopping("rcu_torture_writer"); 1090 return 0; 1091 } 1092 1093 /* 1094 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1095 * delay between calls. 1096 */ 1097 static int 1098 rcu_torture_fakewriter(void *arg) 1099 { 1100 DEFINE_TORTURE_RANDOM(rand); 1101 1102 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1103 set_user_nice(current, MAX_NICE); 1104 1105 do { 1106 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1107 udelay(torture_random(&rand) & 0x3ff); 1108 if (cur_ops->cb_barrier != NULL && 1109 torture_random(&rand) % (nfakewriters * 8) == 0) { 1110 cur_ops->cb_barrier(); 1111 } else if (gp_normal == gp_exp) { 1112 if (cur_ops->sync && torture_random(&rand) & 0x80) 1113 cur_ops->sync(); 1114 else if (cur_ops->exp_sync) 1115 cur_ops->exp_sync(); 1116 } else if (gp_normal && cur_ops->sync) { 1117 cur_ops->sync(); 1118 } else if (cur_ops->exp_sync) { 1119 cur_ops->exp_sync(); 1120 } 1121 stutter_wait("rcu_torture_fakewriter"); 1122 } while (!torture_must_stop()); 1123 1124 torture_kthread_stopping("rcu_torture_fakewriter"); 1125 return 0; 1126 } 1127 1128 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1129 { 1130 kfree(rhp); 1131 } 1132 1133 /* 1134 * Do one extension of an RCU read-side critical section using the 1135 * current reader state in readstate (set to zero for initial entry 1136 * to extended critical section), set the new state as specified by 1137 * newstate (set to zero for final exit from extended critical section), 1138 * and random-number-generator state in trsp. If this is neither the 1139 * beginning or end of the critical section and if there was actually a 1140 * change, do a ->read_delay(). 1141 */ 1142 static void rcutorture_one_extend(int *readstate, int newstate, 1143 struct torture_random_state *trsp, 1144 struct rt_read_seg *rtrsp) 1145 { 1146 int idxnew = -1; 1147 int idxold = *readstate; 1148 int statesnew = ~*readstate & newstate; 1149 int statesold = *readstate & ~newstate; 1150 1151 WARN_ON_ONCE(idxold < 0); 1152 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1153 rtrsp->rt_readstate = newstate; 1154 1155 /* First, put new protection in place to avoid critical-section gap. */ 1156 if (statesnew & RCUTORTURE_RDR_BH) 1157 local_bh_disable(); 1158 if (statesnew & RCUTORTURE_RDR_IRQ) 1159 local_irq_disable(); 1160 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1161 preempt_disable(); 1162 if (statesnew & RCUTORTURE_RDR_RBH) 1163 rcu_read_lock_bh(); 1164 if (statesnew & RCUTORTURE_RDR_SCHED) 1165 rcu_read_lock_sched(); 1166 if (statesnew & RCUTORTURE_RDR_RCU) 1167 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1168 1169 /* Next, remove old protection, irq first due to bh conflict. */ 1170 if (statesold & RCUTORTURE_RDR_IRQ) 1171 local_irq_enable(); 1172 if (statesold & RCUTORTURE_RDR_BH) 1173 local_bh_enable(); 1174 if (statesold & RCUTORTURE_RDR_PREEMPT) 1175 preempt_enable(); 1176 if (statesold & RCUTORTURE_RDR_RBH) 1177 rcu_read_unlock_bh(); 1178 if (statesold & RCUTORTURE_RDR_SCHED) 1179 rcu_read_unlock_sched(); 1180 if (statesold & RCUTORTURE_RDR_RCU) 1181 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1182 1183 /* Delay if neither beginning nor end and there was a change. */ 1184 if ((statesnew || statesold) && *readstate && newstate) 1185 cur_ops->read_delay(trsp, rtrsp); 1186 1187 /* Update the reader state. */ 1188 if (idxnew == -1) 1189 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1190 WARN_ON_ONCE(idxnew < 0); 1191 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1192 *readstate = idxnew | newstate; 1193 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1194 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1195 } 1196 1197 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1198 static int rcutorture_extend_mask_max(void) 1199 { 1200 int mask; 1201 1202 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1203 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1204 mask = mask | RCUTORTURE_RDR_RCU; 1205 return mask; 1206 } 1207 1208 /* Return a random protection state mask, but with at least one bit set. */ 1209 static int 1210 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1211 { 1212 int mask = rcutorture_extend_mask_max(); 1213 unsigned long randmask1 = torture_random(trsp) >> 8; 1214 unsigned long randmask2 = randmask1 >> 3; 1215 1216 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1217 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1218 if (!(randmask1 & 0x7)) 1219 mask = mask & randmask2; 1220 else 1221 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1222 /* Can't enable bh w/irq disabled. */ 1223 if ((mask & RCUTORTURE_RDR_IRQ) && 1224 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1225 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1226 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1227 return mask ?: RCUTORTURE_RDR_RCU; 1228 } 1229 1230 /* 1231 * Do a randomly selected number of extensions of an existing RCU read-side 1232 * critical section. 1233 */ 1234 static struct rt_read_seg * 1235 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1236 struct rt_read_seg *rtrsp) 1237 { 1238 int i; 1239 int j; 1240 int mask = rcutorture_extend_mask_max(); 1241 1242 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1243 if (!((mask - 1) & mask)) 1244 return rtrsp; /* Current RCU reader not extendable. */ 1245 /* Bias towards larger numbers of loops. */ 1246 i = (torture_random(trsp) >> 3); 1247 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1248 for (j = 0; j < i; j++) { 1249 mask = rcutorture_extend_mask(*readstate, trsp); 1250 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1251 } 1252 return &rtrsp[j]; 1253 } 1254 1255 /* 1256 * Do one read-side critical section, returning false if there was 1257 * no data to read. Can be invoked both from process context and 1258 * from a timer handler. 1259 */ 1260 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1261 { 1262 int i; 1263 unsigned long started; 1264 unsigned long completed; 1265 int newstate; 1266 struct rcu_torture *p; 1267 int pipe_count; 1268 int readstate = 0; 1269 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1270 struct rt_read_seg *rtrsp = &rtseg[0]; 1271 struct rt_read_seg *rtrsp1; 1272 unsigned long long ts; 1273 1274 newstate = rcutorture_extend_mask(readstate, trsp); 1275 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1276 started = cur_ops->get_gp_seq(); 1277 ts = rcu_trace_clock_local(); 1278 p = rcu_dereference_check(rcu_torture_current, 1279 rcu_read_lock_bh_held() || 1280 rcu_read_lock_sched_held() || 1281 srcu_read_lock_held(srcu_ctlp) || 1282 torturing_tasks()); 1283 if (p == NULL) { 1284 /* Wait for rcu_torture_writer to get underway */ 1285 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1286 return false; 1287 } 1288 if (p->rtort_mbtest == 0) 1289 atomic_inc(&n_rcu_torture_mberror); 1290 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1291 preempt_disable(); 1292 pipe_count = p->rtort_pipe_count; 1293 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1294 /* Should not happen, but... */ 1295 pipe_count = RCU_TORTURE_PIPE_LEN; 1296 } 1297 completed = cur_ops->get_gp_seq(); 1298 if (pipe_count > 1) { 1299 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1300 ts, started, completed); 1301 rcu_ftrace_dump(DUMP_ALL); 1302 } 1303 __this_cpu_inc(rcu_torture_count[pipe_count]); 1304 completed = rcutorture_seq_diff(completed, started); 1305 if (completed > RCU_TORTURE_PIPE_LEN) { 1306 /* Should not happen, but... */ 1307 completed = RCU_TORTURE_PIPE_LEN; 1308 } 1309 __this_cpu_inc(rcu_torture_batch[completed]); 1310 preempt_enable(); 1311 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1312 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1313 1314 /* If error or close call, record the sequence of reader protections. */ 1315 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1316 i = 0; 1317 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1318 err_segs[i++] = *rtrsp1; 1319 rt_read_nsegs = i; 1320 } 1321 1322 return true; 1323 } 1324 1325 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1326 1327 /* 1328 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1329 * incrementing the corresponding element of the pipeline array. The 1330 * counter in the element should never be greater than 1, otherwise, the 1331 * RCU implementation is broken. 1332 */ 1333 static void rcu_torture_timer(struct timer_list *unused) 1334 { 1335 atomic_long_inc(&n_rcu_torture_timers); 1336 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1337 1338 /* Test call_rcu() invocation from interrupt handler. */ 1339 if (cur_ops->call) { 1340 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1341 1342 if (rhp) 1343 cur_ops->call(rhp, rcu_torture_timer_cb); 1344 } 1345 } 1346 1347 /* 1348 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1349 * incrementing the corresponding element of the pipeline array. The 1350 * counter in the element should never be greater than 1, otherwise, the 1351 * RCU implementation is broken. 1352 */ 1353 static int 1354 rcu_torture_reader(void *arg) 1355 { 1356 unsigned long lastsleep = jiffies; 1357 long myid = (long)arg; 1358 int mynumonline = myid; 1359 DEFINE_TORTURE_RANDOM(rand); 1360 struct timer_list t; 1361 1362 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1363 set_user_nice(current, MAX_NICE); 1364 if (irqreader && cur_ops->irq_capable) 1365 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1366 1367 do { 1368 if (irqreader && cur_ops->irq_capable) { 1369 if (!timer_pending(&t)) 1370 mod_timer(&t, jiffies + 1); 1371 } 1372 if (!rcu_torture_one_read(&rand)) 1373 schedule_timeout_interruptible(HZ); 1374 if (time_after(jiffies, lastsleep)) { 1375 schedule_timeout_interruptible(1); 1376 lastsleep = jiffies + 10; 1377 } 1378 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1379 schedule_timeout_interruptible(HZ / 5); 1380 stutter_wait("rcu_torture_reader"); 1381 } while (!torture_must_stop()); 1382 if (irqreader && cur_ops->irq_capable) { 1383 del_timer_sync(&t); 1384 destroy_timer_on_stack(&t); 1385 } 1386 torture_kthread_stopping("rcu_torture_reader"); 1387 return 0; 1388 } 1389 1390 /* 1391 * Print torture statistics. Caller must ensure that there is only 1392 * one call to this function at a given time!!! This is normally 1393 * accomplished by relying on the module system to only have one copy 1394 * of the module loaded, and then by giving the rcu_torture_stats 1395 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1396 * thread is not running). 1397 */ 1398 static void 1399 rcu_torture_stats_print(void) 1400 { 1401 int cpu; 1402 int i; 1403 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1404 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1405 static unsigned long rtcv_snap = ULONG_MAX; 1406 static bool splatted; 1407 struct task_struct *wtp; 1408 1409 for_each_possible_cpu(cpu) { 1410 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1411 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1412 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1413 } 1414 } 1415 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1416 if (pipesummary[i] != 0) 1417 break; 1418 } 1419 1420 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1421 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1422 rcu_torture_current, 1423 rcu_torture_current ? "ver" : "VER", 1424 rcu_torture_current_version, 1425 list_empty(&rcu_torture_freelist), 1426 atomic_read(&n_rcu_torture_alloc), 1427 atomic_read(&n_rcu_torture_alloc_fail), 1428 atomic_read(&n_rcu_torture_free)); 1429 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1430 atomic_read(&n_rcu_torture_mberror), 1431 n_rcu_torture_barrier_error, 1432 n_rcu_torture_boost_ktrerror, 1433 n_rcu_torture_boost_rterror); 1434 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1435 n_rcu_torture_boost_failure, 1436 n_rcu_torture_boosts, 1437 atomic_long_read(&n_rcu_torture_timers)); 1438 torture_onoff_stats(); 1439 pr_cont("barrier: %ld/%ld:%ld\n", 1440 n_barrier_successes, 1441 n_barrier_attempts, 1442 n_rcu_torture_barrier_error); 1443 1444 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1445 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1446 n_rcu_torture_barrier_error != 0 || 1447 n_rcu_torture_boost_ktrerror != 0 || 1448 n_rcu_torture_boost_rterror != 0 || 1449 n_rcu_torture_boost_failure != 0 || 1450 i > 1) { 1451 pr_cont("%s", "!!! "); 1452 atomic_inc(&n_rcu_torture_error); 1453 WARN_ON_ONCE(1); 1454 } 1455 pr_cont("Reader Pipe: "); 1456 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1457 pr_cont(" %ld", pipesummary[i]); 1458 pr_cont("\n"); 1459 1460 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1461 pr_cont("Reader Batch: "); 1462 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1463 pr_cont(" %ld", batchsummary[i]); 1464 pr_cont("\n"); 1465 1466 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1467 pr_cont("Free-Block Circulation: "); 1468 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1469 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1470 } 1471 pr_cont("\n"); 1472 1473 if (cur_ops->stats) 1474 cur_ops->stats(); 1475 if (rtcv_snap == rcu_torture_current_version && 1476 rcu_torture_current != NULL) { 1477 int __maybe_unused flags = 0; 1478 unsigned long __maybe_unused gp_seq = 0; 1479 1480 rcutorture_get_gp_data(cur_ops->ttype, 1481 &flags, &gp_seq); 1482 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1483 &flags, &gp_seq); 1484 wtp = READ_ONCE(writer_task); 1485 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1486 rcu_torture_writer_state_getname(), 1487 rcu_torture_writer_state, gp_seq, flags, 1488 wtp == NULL ? ~0UL : wtp->state, 1489 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1490 if (!splatted && wtp) { 1491 sched_show_task(wtp); 1492 splatted = true; 1493 } 1494 show_rcu_gp_kthreads(); 1495 rcu_ftrace_dump(DUMP_ALL); 1496 } 1497 rtcv_snap = rcu_torture_current_version; 1498 } 1499 1500 /* 1501 * Periodically prints torture statistics, if periodic statistics printing 1502 * was specified via the stat_interval module parameter. 1503 */ 1504 static int 1505 rcu_torture_stats(void *arg) 1506 { 1507 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1508 do { 1509 schedule_timeout_interruptible(stat_interval * HZ); 1510 rcu_torture_stats_print(); 1511 torture_shutdown_absorb("rcu_torture_stats"); 1512 } while (!torture_must_stop()); 1513 torture_kthread_stopping("rcu_torture_stats"); 1514 return 0; 1515 } 1516 1517 static void 1518 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1519 { 1520 pr_alert("%s" TORTURE_FLAG 1521 "--- %s: nreaders=%d nfakewriters=%d " 1522 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1523 "shuffle_interval=%d stutter=%d irqreader=%d " 1524 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1525 "test_boost=%d/%d test_boost_interval=%d " 1526 "test_boost_duration=%d shutdown_secs=%d " 1527 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1528 "n_barrier_cbs=%d " 1529 "onoff_interval=%d onoff_holdoff=%d\n", 1530 torture_type, tag, nrealreaders, nfakewriters, 1531 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1532 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1533 test_boost, cur_ops->can_boost, 1534 test_boost_interval, test_boost_duration, shutdown_secs, 1535 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1536 n_barrier_cbs, 1537 onoff_interval, onoff_holdoff); 1538 } 1539 1540 static int rcutorture_booster_cleanup(unsigned int cpu) 1541 { 1542 struct task_struct *t; 1543 1544 if (boost_tasks[cpu] == NULL) 1545 return 0; 1546 mutex_lock(&boost_mutex); 1547 t = boost_tasks[cpu]; 1548 boost_tasks[cpu] = NULL; 1549 rcu_torture_enable_rt_throttle(); 1550 mutex_unlock(&boost_mutex); 1551 1552 /* This must be outside of the mutex, otherwise deadlock! */ 1553 torture_stop_kthread(rcu_torture_boost, t); 1554 return 0; 1555 } 1556 1557 static int rcutorture_booster_init(unsigned int cpu) 1558 { 1559 int retval; 1560 1561 if (boost_tasks[cpu] != NULL) 1562 return 0; /* Already created, nothing more to do. */ 1563 1564 /* Don't allow time recalculation while creating a new task. */ 1565 mutex_lock(&boost_mutex); 1566 rcu_torture_disable_rt_throttle(); 1567 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1568 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1569 cpu_to_node(cpu), 1570 "rcu_torture_boost"); 1571 if (IS_ERR(boost_tasks[cpu])) { 1572 retval = PTR_ERR(boost_tasks[cpu]); 1573 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1574 n_rcu_torture_boost_ktrerror++; 1575 boost_tasks[cpu] = NULL; 1576 mutex_unlock(&boost_mutex); 1577 return retval; 1578 } 1579 kthread_bind(boost_tasks[cpu], cpu); 1580 wake_up_process(boost_tasks[cpu]); 1581 mutex_unlock(&boost_mutex); 1582 return 0; 1583 } 1584 1585 /* 1586 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1587 * induces a CPU stall for the time specified by stall_cpu. 1588 */ 1589 static int rcu_torture_stall(void *args) 1590 { 1591 unsigned long stop_at; 1592 1593 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1594 if (stall_cpu_holdoff > 0) { 1595 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1596 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1597 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1598 } 1599 if (!kthread_should_stop()) { 1600 stop_at = ktime_get_seconds() + stall_cpu; 1601 /* RCU CPU stall is expected behavior in following code. */ 1602 rcu_read_lock(); 1603 if (stall_cpu_irqsoff) 1604 local_irq_disable(); 1605 else 1606 preempt_disable(); 1607 pr_alert("rcu_torture_stall start on CPU %d.\n", 1608 smp_processor_id()); 1609 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1610 stop_at)) 1611 continue; /* Induce RCU CPU stall warning. */ 1612 if (stall_cpu_irqsoff) 1613 local_irq_enable(); 1614 else 1615 preempt_enable(); 1616 rcu_read_unlock(); 1617 pr_alert("rcu_torture_stall end.\n"); 1618 } 1619 torture_shutdown_absorb("rcu_torture_stall"); 1620 while (!kthread_should_stop()) 1621 schedule_timeout_interruptible(10 * HZ); 1622 return 0; 1623 } 1624 1625 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1626 static int __init rcu_torture_stall_init(void) 1627 { 1628 if (stall_cpu <= 0) 1629 return 0; 1630 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1631 } 1632 1633 /* State structure for forward-progress self-propagating RCU callback. */ 1634 struct fwd_cb_state { 1635 struct rcu_head rh; 1636 int stop; 1637 }; 1638 1639 /* 1640 * Forward-progress self-propagating RCU callback function. Because 1641 * callbacks run from softirq, this function is an implicit RCU read-side 1642 * critical section. 1643 */ 1644 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1645 { 1646 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1647 1648 if (READ_ONCE(fcsp->stop)) { 1649 WRITE_ONCE(fcsp->stop, 2); 1650 return; 1651 } 1652 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1653 } 1654 1655 /* State for continuous-flood RCU callbacks. */ 1656 struct rcu_fwd_cb { 1657 struct rcu_head rh; 1658 struct rcu_fwd_cb *rfc_next; 1659 int rfc_gps; 1660 }; 1661 static DEFINE_SPINLOCK(rcu_fwd_lock); 1662 static struct rcu_fwd_cb *rcu_fwd_cb_head; 1663 static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1664 static long n_launders_cb; 1665 static unsigned long rcu_fwd_startat; 1666 static bool rcu_fwd_emergency_stop; 1667 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1668 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1669 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1670 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1671 struct rcu_launder_hist { 1672 long n_launders; 1673 unsigned long launder_gp_seq; 1674 }; 1675 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1676 static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1677 static unsigned long rcu_launder_gp_seq_start; 1678 1679 static void rcu_torture_fwd_cb_hist(void) 1680 { 1681 unsigned long gps; 1682 unsigned long gps_old; 1683 int i; 1684 int j; 1685 1686 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) 1687 if (n_launders_hist[i].n_launders > 0) 1688 break; 1689 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1690 __func__, jiffies - rcu_fwd_startat); 1691 gps_old = rcu_launder_gp_seq_start; 1692 for (j = 0; j <= i; j++) { 1693 gps = n_launders_hist[j].launder_gp_seq; 1694 pr_cont(" %ds/%d: %ld:%ld", 1695 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders, 1696 rcutorture_seq_diff(gps, gps_old)); 1697 gps_old = gps; 1698 } 1699 pr_cont("\n"); 1700 } 1701 1702 /* Callback function for continuous-flood RCU callbacks. */ 1703 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1704 { 1705 unsigned long flags; 1706 int i; 1707 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1708 struct rcu_fwd_cb **rfcpp; 1709 1710 rfcp->rfc_next = NULL; 1711 rfcp->rfc_gps++; 1712 spin_lock_irqsave(&rcu_fwd_lock, flags); 1713 rfcpp = rcu_fwd_cb_tail; 1714 rcu_fwd_cb_tail = &rfcp->rfc_next; 1715 WRITE_ONCE(*rfcpp, rfcp); 1716 WRITE_ONCE(n_launders_cb, n_launders_cb + 1); 1717 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1718 if (i >= ARRAY_SIZE(n_launders_hist)) 1719 i = ARRAY_SIZE(n_launders_hist) - 1; 1720 n_launders_hist[i].n_launders++; 1721 n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1722 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1723 } 1724 1725 // Give the scheduler a chance, even on nohz_full CPUs. 1726 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 1727 { 1728 if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 1729 // Real call_rcu() floods hit userspace, so emulate that. 1730 if (need_resched() || (iter & 0xfff)) 1731 schedule(); 1732 } else { 1733 // No userspace emulation: CB invocation throttles call_rcu() 1734 cond_resched(); 1735 } 1736 } 1737 1738 /* 1739 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1740 * test is over or because we hit an OOM event. 1741 */ 1742 static unsigned long rcu_torture_fwd_prog_cbfree(void) 1743 { 1744 unsigned long flags; 1745 unsigned long freed = 0; 1746 struct rcu_fwd_cb *rfcp; 1747 1748 for (;;) { 1749 spin_lock_irqsave(&rcu_fwd_lock, flags); 1750 rfcp = rcu_fwd_cb_head; 1751 if (!rfcp) { 1752 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1753 break; 1754 } 1755 rcu_fwd_cb_head = rfcp->rfc_next; 1756 if (!rcu_fwd_cb_head) 1757 rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1758 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1759 kfree(rfcp); 1760 freed++; 1761 rcu_torture_fwd_prog_cond_resched(freed); 1762 } 1763 return freed; 1764 } 1765 1766 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1767 static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries) 1768 { 1769 unsigned long cver; 1770 unsigned long dur; 1771 struct fwd_cb_state fcs; 1772 unsigned long gps; 1773 int idx; 1774 int sd; 1775 int sd4; 1776 bool selfpropcb = false; 1777 unsigned long stopat; 1778 static DEFINE_TORTURE_RANDOM(trs); 1779 1780 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1781 init_rcu_head_on_stack(&fcs.rh); 1782 selfpropcb = true; 1783 } 1784 1785 /* Tight loop containing cond_resched(). */ 1786 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1787 cur_ops->sync(); /* Later readers see above write. */ 1788 if (selfpropcb) { 1789 WRITE_ONCE(fcs.stop, 0); 1790 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1791 } 1792 cver = READ_ONCE(rcu_torture_current_version); 1793 gps = cur_ops->get_gp_seq(); 1794 sd = cur_ops->stall_dur() + 1; 1795 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1796 dur = sd4 + torture_random(&trs) % (sd - sd4); 1797 WRITE_ONCE(rcu_fwd_startat, jiffies); 1798 stopat = rcu_fwd_startat + dur; 1799 while (time_before(jiffies, stopat) && 1800 !shutdown_time_arrived() && 1801 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1802 idx = cur_ops->readlock(); 1803 udelay(10); 1804 cur_ops->readunlock(idx); 1805 if (!fwd_progress_need_resched || need_resched()) 1806 rcu_torture_fwd_prog_cond_resched(1); 1807 } 1808 (*tested_tries)++; 1809 if (!time_before(jiffies, stopat) && 1810 !shutdown_time_arrived() && 1811 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1812 (*tested)++; 1813 cver = READ_ONCE(rcu_torture_current_version) - cver; 1814 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1815 WARN_ON(!cver && gps < 2); 1816 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1817 } 1818 if (selfpropcb) { 1819 WRITE_ONCE(fcs.stop, 1); 1820 cur_ops->sync(); /* Wait for running CB to complete. */ 1821 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1822 } 1823 1824 if (selfpropcb) { 1825 WARN_ON(READ_ONCE(fcs.stop) != 2); 1826 destroy_rcu_head_on_stack(&fcs.rh); 1827 } 1828 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 1829 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1830 } 1831 1832 /* Carry out call_rcu() forward-progress testing. */ 1833 static void rcu_torture_fwd_prog_cr(void) 1834 { 1835 unsigned long cver; 1836 unsigned long gps; 1837 int i; 1838 long n_launders; 1839 long n_launders_cb_snap; 1840 long n_launders_sa; 1841 long n_max_cbs; 1842 long n_max_gps; 1843 struct rcu_fwd_cb *rfcp; 1844 struct rcu_fwd_cb *rfcpn; 1845 unsigned long stopat; 1846 unsigned long stoppedat; 1847 1848 if (READ_ONCE(rcu_fwd_emergency_stop)) 1849 return; /* Get out of the way quickly, no GP wait! */ 1850 if (!cur_ops->call) 1851 return; /* Can't do call_rcu() fwd prog without ->call. */ 1852 1853 /* Loop continuously posting RCU callbacks. */ 1854 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1855 cur_ops->sync(); /* Later readers see above write. */ 1856 WRITE_ONCE(rcu_fwd_startat, jiffies); 1857 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1858 n_launders = 0; 1859 n_launders_cb = 0; 1860 n_launders_sa = 0; 1861 n_max_cbs = 0; 1862 n_max_gps = 0; 1863 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) 1864 n_launders_hist[i].n_launders = 0; 1865 cver = READ_ONCE(rcu_torture_current_version); 1866 gps = cur_ops->get_gp_seq(); 1867 rcu_launder_gp_seq_start = gps; 1868 while (time_before(jiffies, stopat) && 1869 !shutdown_time_arrived() && 1870 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1871 rfcp = READ_ONCE(rcu_fwd_cb_head); 1872 rfcpn = NULL; 1873 if (rfcp) 1874 rfcpn = READ_ONCE(rfcp->rfc_next); 1875 if (rfcpn) { 1876 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 1877 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 1878 break; 1879 rcu_fwd_cb_head = rfcpn; 1880 n_launders++; 1881 n_launders_sa++; 1882 } else { 1883 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 1884 if (WARN_ON_ONCE(!rfcp)) { 1885 schedule_timeout_interruptible(1); 1886 continue; 1887 } 1888 n_max_cbs++; 1889 n_launders_sa = 0; 1890 rfcp->rfc_gps = 0; 1891 } 1892 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 1893 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 1894 } 1895 stoppedat = jiffies; 1896 n_launders_cb_snap = READ_ONCE(n_launders_cb); 1897 cver = READ_ONCE(rcu_torture_current_version) - cver; 1898 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1899 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 1900 (void)rcu_torture_fwd_prog_cbfree(); 1901 1902 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 1903 !shutdown_time_arrived()) { 1904 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 1905 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 1906 __func__, 1907 stoppedat - rcu_fwd_startat, jiffies - stoppedat, 1908 n_launders + n_max_cbs - n_launders_cb_snap, 1909 n_launders, n_launders_sa, 1910 n_max_gps, n_max_cbs, cver, gps); 1911 rcu_torture_fwd_cb_hist(); 1912 } 1913 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 1914 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1915 } 1916 1917 1918 /* 1919 * OOM notifier, but this only prints diagnostic information for the 1920 * current forward-progress test. 1921 */ 1922 static int rcutorture_oom_notify(struct notifier_block *self, 1923 unsigned long notused, void *nfreed) 1924 { 1925 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 1926 __func__); 1927 rcu_torture_fwd_cb_hist(); 1928 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2); 1929 WRITE_ONCE(rcu_fwd_emergency_stop, true); 1930 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 1931 pr_info("%s: Freed %lu RCU callbacks.\n", 1932 __func__, rcu_torture_fwd_prog_cbfree()); 1933 rcu_barrier(); 1934 pr_info("%s: Freed %lu RCU callbacks.\n", 1935 __func__, rcu_torture_fwd_prog_cbfree()); 1936 rcu_barrier(); 1937 pr_info("%s: Freed %lu RCU callbacks.\n", 1938 __func__, rcu_torture_fwd_prog_cbfree()); 1939 smp_mb(); /* Frees before return to avoid redoing OOM. */ 1940 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 1941 pr_info("%s returning after OOM processing.\n", __func__); 1942 return NOTIFY_OK; 1943 } 1944 1945 static struct notifier_block rcutorture_oom_nb = { 1946 .notifier_call = rcutorture_oom_notify 1947 }; 1948 1949 /* Carry out grace-period forward-progress testing. */ 1950 static int rcu_torture_fwd_prog(void *args) 1951 { 1952 int tested = 0; 1953 int tested_tries = 0; 1954 1955 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 1956 rcu_bind_current_to_nocb(); 1957 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 1958 set_user_nice(current, MAX_NICE); 1959 do { 1960 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 1961 WRITE_ONCE(rcu_fwd_emergency_stop, false); 1962 register_oom_notifier(&rcutorture_oom_nb); 1963 rcu_torture_fwd_prog_nr(&tested, &tested_tries); 1964 rcu_torture_fwd_prog_cr(); 1965 unregister_oom_notifier(&rcutorture_oom_nb); 1966 1967 /* Avoid slow periods, better to test when busy. */ 1968 stutter_wait("rcu_torture_fwd_prog"); 1969 } while (!torture_must_stop()); 1970 /* Short runs might not contain a valid forward-progress attempt. */ 1971 WARN_ON(!tested && tested_tries >= 5); 1972 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 1973 torture_kthread_stopping("rcu_torture_fwd_prog"); 1974 return 0; 1975 } 1976 1977 /* If forward-progress checking is requested and feasible, spawn the thread. */ 1978 static int __init rcu_torture_fwd_prog_init(void) 1979 { 1980 if (!fwd_progress) 1981 return 0; /* Not requested, so don't do it. */ 1982 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 1983 cur_ops == &rcu_busted_ops) { 1984 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 1985 return 0; 1986 } 1987 if (stall_cpu > 0) { 1988 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 1989 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 1990 return -EINVAL; /* In module, can fail back to user. */ 1991 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 1992 return 0; 1993 } 1994 if (fwd_progress_holdoff <= 0) 1995 fwd_progress_holdoff = 1; 1996 if (fwd_progress_div <= 0) 1997 fwd_progress_div = 4; 1998 return torture_create_kthread(rcu_torture_fwd_prog, 1999 NULL, fwd_prog_task); 2000 } 2001 2002 /* Callback function for RCU barrier testing. */ 2003 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2004 { 2005 atomic_inc(&barrier_cbs_invoked); 2006 } 2007 2008 /* kthread function to register callbacks used to test RCU barriers. */ 2009 static int rcu_torture_barrier_cbs(void *arg) 2010 { 2011 long myid = (long)arg; 2012 bool lastphase = 0; 2013 bool newphase; 2014 struct rcu_head rcu; 2015 2016 init_rcu_head_on_stack(&rcu); 2017 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2018 set_user_nice(current, MAX_NICE); 2019 do { 2020 wait_event(barrier_cbs_wq[myid], 2021 (newphase = 2022 smp_load_acquire(&barrier_phase)) != lastphase || 2023 torture_must_stop()); 2024 lastphase = newphase; 2025 if (torture_must_stop()) 2026 break; 2027 /* 2028 * The above smp_load_acquire() ensures barrier_phase load 2029 * is ordered before the following ->call(). 2030 */ 2031 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 2032 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2033 local_irq_enable(); 2034 if (atomic_dec_and_test(&barrier_cbs_count)) 2035 wake_up(&barrier_wq); 2036 } while (!torture_must_stop()); 2037 if (cur_ops->cb_barrier != NULL) 2038 cur_ops->cb_barrier(); 2039 destroy_rcu_head_on_stack(&rcu); 2040 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2041 return 0; 2042 } 2043 2044 /* kthread function to drive and coordinate RCU barrier testing. */ 2045 static int rcu_torture_barrier(void *arg) 2046 { 2047 int i; 2048 2049 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2050 do { 2051 atomic_set(&barrier_cbs_invoked, 0); 2052 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2053 /* Ensure barrier_phase ordered after prior assignments. */ 2054 smp_store_release(&barrier_phase, !barrier_phase); 2055 for (i = 0; i < n_barrier_cbs; i++) 2056 wake_up(&barrier_cbs_wq[i]); 2057 wait_event(barrier_wq, 2058 atomic_read(&barrier_cbs_count) == 0 || 2059 torture_must_stop()); 2060 if (torture_must_stop()) 2061 break; 2062 n_barrier_attempts++; 2063 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2064 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2065 n_rcu_torture_barrier_error++; 2066 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2067 atomic_read(&barrier_cbs_invoked), 2068 n_barrier_cbs); 2069 WARN_ON_ONCE(1); 2070 } else { 2071 n_barrier_successes++; 2072 } 2073 schedule_timeout_interruptible(HZ / 10); 2074 } while (!torture_must_stop()); 2075 torture_kthread_stopping("rcu_torture_barrier"); 2076 return 0; 2077 } 2078 2079 /* Initialize RCU barrier testing. */ 2080 static int rcu_torture_barrier_init(void) 2081 { 2082 int i; 2083 int ret; 2084 2085 if (n_barrier_cbs <= 0) 2086 return 0; 2087 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2088 pr_alert("%s" TORTURE_FLAG 2089 " Call or barrier ops missing for %s,\n", 2090 torture_type, cur_ops->name); 2091 pr_alert("%s" TORTURE_FLAG 2092 " RCU barrier testing omitted from run.\n", 2093 torture_type); 2094 return 0; 2095 } 2096 atomic_set(&barrier_cbs_count, 0); 2097 atomic_set(&barrier_cbs_invoked, 0); 2098 barrier_cbs_tasks = 2099 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2100 GFP_KERNEL); 2101 barrier_cbs_wq = 2102 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2103 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2104 return -ENOMEM; 2105 for (i = 0; i < n_barrier_cbs; i++) { 2106 init_waitqueue_head(&barrier_cbs_wq[i]); 2107 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2108 (void *)(long)i, 2109 barrier_cbs_tasks[i]); 2110 if (ret) 2111 return ret; 2112 } 2113 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2114 } 2115 2116 /* Clean up after RCU barrier testing. */ 2117 static void rcu_torture_barrier_cleanup(void) 2118 { 2119 int i; 2120 2121 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2122 if (barrier_cbs_tasks != NULL) { 2123 for (i = 0; i < n_barrier_cbs; i++) 2124 torture_stop_kthread(rcu_torture_barrier_cbs, 2125 barrier_cbs_tasks[i]); 2126 kfree(barrier_cbs_tasks); 2127 barrier_cbs_tasks = NULL; 2128 } 2129 if (barrier_cbs_wq != NULL) { 2130 kfree(barrier_cbs_wq); 2131 barrier_cbs_wq = NULL; 2132 } 2133 } 2134 2135 static bool rcu_torture_can_boost(void) 2136 { 2137 static int boost_warn_once; 2138 int prio; 2139 2140 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2141 return false; 2142 2143 prio = rcu_get_gp_kthreads_prio(); 2144 if (!prio) 2145 return false; 2146 2147 if (prio < 2) { 2148 if (boost_warn_once == 1) 2149 return false; 2150 2151 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2152 boost_warn_once = 1; 2153 return false; 2154 } 2155 2156 return true; 2157 } 2158 2159 static enum cpuhp_state rcutor_hp; 2160 2161 static void 2162 rcu_torture_cleanup(void) 2163 { 2164 int firsttime; 2165 int flags = 0; 2166 unsigned long gp_seq = 0; 2167 int i; 2168 2169 if (torture_cleanup_begin()) { 2170 if (cur_ops->cb_barrier != NULL) 2171 cur_ops->cb_barrier(); 2172 return; 2173 } 2174 if (!cur_ops) { 2175 torture_cleanup_end(); 2176 return; 2177 } 2178 2179 show_rcu_gp_kthreads(); 2180 rcu_torture_barrier_cleanup(); 2181 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2182 torture_stop_kthread(rcu_torture_stall, stall_task); 2183 torture_stop_kthread(rcu_torture_writer, writer_task); 2184 2185 if (reader_tasks) { 2186 for (i = 0; i < nrealreaders; i++) 2187 torture_stop_kthread(rcu_torture_reader, 2188 reader_tasks[i]); 2189 kfree(reader_tasks); 2190 } 2191 rcu_torture_current = NULL; 2192 2193 if (fakewriter_tasks) { 2194 for (i = 0; i < nfakewriters; i++) { 2195 torture_stop_kthread(rcu_torture_fakewriter, 2196 fakewriter_tasks[i]); 2197 } 2198 kfree(fakewriter_tasks); 2199 fakewriter_tasks = NULL; 2200 } 2201 2202 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2203 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2204 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2205 cur_ops->name, gp_seq, flags); 2206 torture_stop_kthread(rcu_torture_stats, stats_task); 2207 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2208 if (rcu_torture_can_boost()) 2209 cpuhp_remove_state(rcutor_hp); 2210 2211 /* 2212 * Wait for all RCU callbacks to fire, then do torture-type-specific 2213 * cleanup operations. 2214 */ 2215 if (cur_ops->cb_barrier != NULL) 2216 cur_ops->cb_barrier(); 2217 if (cur_ops->cleanup != NULL) 2218 cur_ops->cleanup(); 2219 2220 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2221 2222 if (err_segs_recorded) { 2223 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2224 if (rt_read_nsegs == 0) 2225 pr_alert("\t: No segments recorded!!!\n"); 2226 firsttime = 1; 2227 for (i = 0; i < rt_read_nsegs; i++) { 2228 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2229 if (err_segs[i].rt_delay_jiffies != 0) { 2230 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2231 err_segs[i].rt_delay_jiffies); 2232 firsttime = 0; 2233 } 2234 if (err_segs[i].rt_delay_ms != 0) { 2235 pr_cont("%s%ldms", firsttime ? "" : "+", 2236 err_segs[i].rt_delay_ms); 2237 firsttime = 0; 2238 } 2239 if (err_segs[i].rt_delay_us != 0) { 2240 pr_cont("%s%ldus", firsttime ? "" : "+", 2241 err_segs[i].rt_delay_us); 2242 firsttime = 0; 2243 } 2244 pr_cont("%s\n", 2245 err_segs[i].rt_preempted ? "preempted" : ""); 2246 2247 } 2248 } 2249 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2250 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2251 else if (torture_onoff_failures()) 2252 rcu_torture_print_module_parms(cur_ops, 2253 "End of test: RCU_HOTPLUG"); 2254 else 2255 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2256 torture_cleanup_end(); 2257 } 2258 2259 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2260 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2261 { 2262 } 2263 2264 static void rcu_torture_err_cb(struct rcu_head *rhp) 2265 { 2266 /* 2267 * This -might- happen due to race conditions, but is unlikely. 2268 * The scenario that leads to this happening is that the 2269 * first of the pair of duplicate callbacks is queued, 2270 * someone else starts a grace period that includes that 2271 * callback, then the second of the pair must wait for the 2272 * next grace period. Unlikely, but can happen. If it 2273 * does happen, the debug-objects subsystem won't have splatted. 2274 */ 2275 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2276 } 2277 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2278 2279 /* 2280 * Verify that double-free causes debug-objects to complain, but only 2281 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2282 * cannot be carried out. 2283 */ 2284 static void rcu_test_debug_objects(void) 2285 { 2286 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2287 struct rcu_head rh1; 2288 struct rcu_head rh2; 2289 2290 init_rcu_head_on_stack(&rh1); 2291 init_rcu_head_on_stack(&rh2); 2292 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2293 2294 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2295 preempt_disable(); /* Prevent preemption from interrupting test. */ 2296 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2297 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2298 local_irq_disable(); /* Make it harder to start a new grace period. */ 2299 call_rcu(&rh2, rcu_torture_leak_cb); 2300 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2301 local_irq_enable(); 2302 rcu_read_unlock(); 2303 preempt_enable(); 2304 2305 /* Wait for them all to get done so we can safely return. */ 2306 rcu_barrier(); 2307 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2308 destroy_rcu_head_on_stack(&rh1); 2309 destroy_rcu_head_on_stack(&rh2); 2310 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2311 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2312 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2313 } 2314 2315 static void rcutorture_sync(void) 2316 { 2317 static unsigned long n; 2318 2319 if (cur_ops->sync && !(++n & 0xfff)) 2320 cur_ops->sync(); 2321 } 2322 2323 static int __init 2324 rcu_torture_init(void) 2325 { 2326 long i; 2327 int cpu; 2328 int firsterr = 0; 2329 static struct rcu_torture_ops *torture_ops[] = { 2330 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2331 &busted_srcud_ops, &tasks_ops, &trivial_ops, 2332 }; 2333 2334 if (!torture_init_begin(torture_type, verbose)) 2335 return -EBUSY; 2336 2337 /* Process args and tell the world that the torturer is on the job. */ 2338 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2339 cur_ops = torture_ops[i]; 2340 if (strcmp(torture_type, cur_ops->name) == 0) 2341 break; 2342 } 2343 if (i == ARRAY_SIZE(torture_ops)) { 2344 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2345 torture_type); 2346 pr_alert("rcu-torture types:"); 2347 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2348 pr_cont(" %s", torture_ops[i]->name); 2349 pr_cont("\n"); 2350 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2351 firsterr = -EINVAL; 2352 cur_ops = NULL; 2353 goto unwind; 2354 } 2355 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2356 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2357 fqs_duration = 0; 2358 } 2359 if (cur_ops->init) 2360 cur_ops->init(); 2361 2362 if (nreaders >= 0) { 2363 nrealreaders = nreaders; 2364 } else { 2365 nrealreaders = num_online_cpus() - 2 - nreaders; 2366 if (nrealreaders <= 0) 2367 nrealreaders = 1; 2368 } 2369 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2370 2371 /* Set up the freelist. */ 2372 2373 INIT_LIST_HEAD(&rcu_torture_freelist); 2374 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2375 rcu_tortures[i].rtort_mbtest = 0; 2376 list_add_tail(&rcu_tortures[i].rtort_free, 2377 &rcu_torture_freelist); 2378 } 2379 2380 /* Initialize the statistics so that each run gets its own numbers. */ 2381 2382 rcu_torture_current = NULL; 2383 rcu_torture_current_version = 0; 2384 atomic_set(&n_rcu_torture_alloc, 0); 2385 atomic_set(&n_rcu_torture_alloc_fail, 0); 2386 atomic_set(&n_rcu_torture_free, 0); 2387 atomic_set(&n_rcu_torture_mberror, 0); 2388 atomic_set(&n_rcu_torture_error, 0); 2389 n_rcu_torture_barrier_error = 0; 2390 n_rcu_torture_boost_ktrerror = 0; 2391 n_rcu_torture_boost_rterror = 0; 2392 n_rcu_torture_boost_failure = 0; 2393 n_rcu_torture_boosts = 0; 2394 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2395 atomic_set(&rcu_torture_wcount[i], 0); 2396 for_each_possible_cpu(cpu) { 2397 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2398 per_cpu(rcu_torture_count, cpu)[i] = 0; 2399 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2400 } 2401 } 2402 err_segs_recorded = 0; 2403 rt_read_nsegs = 0; 2404 2405 /* Start up the kthreads. */ 2406 2407 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2408 writer_task); 2409 if (firsterr) 2410 goto unwind; 2411 if (nfakewriters > 0) { 2412 fakewriter_tasks = kcalloc(nfakewriters, 2413 sizeof(fakewriter_tasks[0]), 2414 GFP_KERNEL); 2415 if (fakewriter_tasks == NULL) { 2416 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2417 firsterr = -ENOMEM; 2418 goto unwind; 2419 } 2420 } 2421 for (i = 0; i < nfakewriters; i++) { 2422 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2423 NULL, fakewriter_tasks[i]); 2424 if (firsterr) 2425 goto unwind; 2426 } 2427 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2428 GFP_KERNEL); 2429 if (reader_tasks == NULL) { 2430 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2431 firsterr = -ENOMEM; 2432 goto unwind; 2433 } 2434 for (i = 0; i < nrealreaders; i++) { 2435 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2436 reader_tasks[i]); 2437 if (firsterr) 2438 goto unwind; 2439 } 2440 if (stat_interval > 0) { 2441 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2442 stats_task); 2443 if (firsterr) 2444 goto unwind; 2445 } 2446 if (test_no_idle_hz && shuffle_interval > 0) { 2447 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2448 if (firsterr) 2449 goto unwind; 2450 } 2451 if (stutter < 0) 2452 stutter = 0; 2453 if (stutter) { 2454 int t; 2455 2456 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2457 firsterr = torture_stutter_init(stutter * HZ, t); 2458 if (firsterr) 2459 goto unwind; 2460 } 2461 if (fqs_duration < 0) 2462 fqs_duration = 0; 2463 if (fqs_duration) { 2464 /* Create the fqs thread */ 2465 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2466 fqs_task); 2467 if (firsterr) 2468 goto unwind; 2469 } 2470 if (test_boost_interval < 1) 2471 test_boost_interval = 1; 2472 if (test_boost_duration < 2) 2473 test_boost_duration = 2; 2474 if (rcu_torture_can_boost()) { 2475 2476 boost_starttime = jiffies + test_boost_interval * HZ; 2477 2478 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2479 rcutorture_booster_init, 2480 rcutorture_booster_cleanup); 2481 if (firsterr < 0) 2482 goto unwind; 2483 rcutor_hp = firsterr; 2484 } 2485 shutdown_jiffies = jiffies + shutdown_secs * HZ; 2486 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2487 if (firsterr) 2488 goto unwind; 2489 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2490 rcutorture_sync); 2491 if (firsterr) 2492 goto unwind; 2493 firsterr = rcu_torture_stall_init(); 2494 if (firsterr) 2495 goto unwind; 2496 firsterr = rcu_torture_fwd_prog_init(); 2497 if (firsterr) 2498 goto unwind; 2499 firsterr = rcu_torture_barrier_init(); 2500 if (firsterr) 2501 goto unwind; 2502 if (object_debug) 2503 rcu_test_debug_objects(); 2504 torture_init_end(); 2505 return 0; 2506 2507 unwind: 2508 torture_init_end(); 2509 rcu_torture_cleanup(); 2510 return firsterr; 2511 } 2512 2513 module_init(rcu_torture_init); 2514 module_exit(rcu_torture_cleanup); 2515