1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Module-based torture test facility for locking 4 * 5 * Copyright (C) IBM Corporation, 2014 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Davidlohr Bueso <dave@stgolabs.net> 9 * Based on kernel/rcu/torture.c. 10 */ 11 12 #define pr_fmt(fmt) fmt 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/kthread.h> 17 #include <linux/sched/rt.h> 18 #include <linux/spinlock.h> 19 #include <linux/mutex.h> 20 #include <linux/rwsem.h> 21 #include <linux/smp.h> 22 #include <linux/interrupt.h> 23 #include <linux/sched.h> 24 #include <uapi/linux/sched/types.h> 25 #include <linux/rtmutex.h> 26 #include <linux/atomic.h> 27 #include <linux/moduleparam.h> 28 #include <linux/delay.h> 29 #include <linux/slab.h> 30 #include <linux/torture.h> 31 #include <linux/reboot.h> 32 33 MODULE_LICENSE("GPL"); 34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); 35 36 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads"); 37 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads"); 38 torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable"); 39 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 40 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable"); 41 torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable"); 42 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 43 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); 44 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 45 torture_param(int, rt_boost, 2, 46 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); 47 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens."); 48 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 49 torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)"); 50 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */ 51 #define MAX_NESTED_LOCKS 8 52 53 static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock"; 54 module_param(torture_type, charp, 0444); 55 MODULE_PARM_DESC(torture_type, 56 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 57 58 static struct task_struct *stats_task; 59 static struct task_struct **writer_tasks; 60 static struct task_struct **reader_tasks; 61 62 static bool lock_is_write_held; 63 static atomic_t lock_is_read_held; 64 static unsigned long last_lock_release; 65 66 struct lock_stress_stats { 67 long n_lock_fail; 68 long n_lock_acquired; 69 }; 70 71 /* Forward reference. */ 72 static void lock_torture_cleanup(void); 73 74 /* 75 * Operations vector for selecting different types of tests. 76 */ 77 struct lock_torture_ops { 78 void (*init)(void); 79 void (*exit)(void); 80 int (*nested_lock)(int tid, u32 lockset); 81 int (*writelock)(int tid); 82 void (*write_delay)(struct torture_random_state *trsp); 83 void (*task_boost)(struct torture_random_state *trsp); 84 void (*writeunlock)(int tid); 85 void (*nested_unlock)(int tid, u32 lockset); 86 int (*readlock)(int tid); 87 void (*read_delay)(struct torture_random_state *trsp); 88 void (*readunlock)(int tid); 89 90 unsigned long flags; /* for irq spinlocks */ 91 const char *name; 92 }; 93 94 struct lock_torture_cxt { 95 int nrealwriters_stress; 96 int nrealreaders_stress; 97 bool debug_lock; 98 bool init_called; 99 atomic_t n_lock_torture_errors; 100 struct lock_torture_ops *cur_ops; 101 struct lock_stress_stats *lwsa; /* writer statistics */ 102 struct lock_stress_stats *lrsa; /* reader statistics */ 103 }; 104 static struct lock_torture_cxt cxt = { 0, 0, false, false, 105 ATOMIC_INIT(0), 106 NULL, NULL}; 107 /* 108 * Definitions for lock torture testing. 109 */ 110 111 static int torture_lock_busted_write_lock(int tid __maybe_unused) 112 { 113 return 0; /* BUGGY, do not use in real life!!! */ 114 } 115 116 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 117 { 118 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX; 119 120 /* We want a long delay occasionally to force massive contention. */ 121 if (!(torture_random(trsp) % 122 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 123 mdelay(longdelay_ms); 124 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 125 torture_preempt_schedule(); /* Allow test to be preempted. */ 126 } 127 128 static void torture_lock_busted_write_unlock(int tid __maybe_unused) 129 { 130 /* BUGGY, do not use in real life!!! */ 131 } 132 133 static void __torture_rt_boost(struct torture_random_state *trsp) 134 { 135 const unsigned int factor = rt_boost_factor; 136 137 if (!rt_task(current)) { 138 /* 139 * Boost priority once every rt_boost_factor operations. When 140 * the task tries to take the lock, the rtmutex it will account 141 * for the new priority, and do any corresponding pi-dance. 142 */ 143 if (trsp && !(torture_random(trsp) % 144 (cxt.nrealwriters_stress * factor))) { 145 sched_set_fifo(current); 146 } else /* common case, do nothing */ 147 return; 148 } else { 149 /* 150 * The task will remain boosted for another 10 * rt_boost_factor 151 * operations, then restored back to its original prio, and so 152 * forth. 153 * 154 * When @trsp is nil, we want to force-reset the task for 155 * stopping the kthread. 156 */ 157 if (!trsp || !(torture_random(trsp) % 158 (cxt.nrealwriters_stress * factor * 2))) { 159 sched_set_normal(current, 0); 160 } else /* common case, do nothing */ 161 return; 162 } 163 } 164 165 static void torture_rt_boost(struct torture_random_state *trsp) 166 { 167 if (rt_boost != 2) 168 return; 169 170 __torture_rt_boost(trsp); 171 } 172 173 static struct lock_torture_ops lock_busted_ops = { 174 .writelock = torture_lock_busted_write_lock, 175 .write_delay = torture_lock_busted_write_delay, 176 .task_boost = torture_rt_boost, 177 .writeunlock = torture_lock_busted_write_unlock, 178 .readlock = NULL, 179 .read_delay = NULL, 180 .readunlock = NULL, 181 .name = "lock_busted" 182 }; 183 184 static DEFINE_SPINLOCK(torture_spinlock); 185 186 static int torture_spin_lock_write_lock(int tid __maybe_unused) 187 __acquires(torture_spinlock) 188 { 189 spin_lock(&torture_spinlock); 190 return 0; 191 } 192 193 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 194 { 195 const unsigned long shortdelay_us = 2; 196 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX; 197 unsigned long j; 198 199 /* We want a short delay mostly to emulate likely code, and 200 * we want a long delay occasionally to force massive contention. 201 */ 202 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * longdelay_ms))) { 203 j = jiffies; 204 mdelay(longdelay_ms); 205 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j); 206 } 207 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us))) 208 udelay(shortdelay_us); 209 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 210 torture_preempt_schedule(); /* Allow test to be preempted. */ 211 } 212 213 static void torture_spin_lock_write_unlock(int tid __maybe_unused) 214 __releases(torture_spinlock) 215 { 216 spin_unlock(&torture_spinlock); 217 } 218 219 static struct lock_torture_ops spin_lock_ops = { 220 .writelock = torture_spin_lock_write_lock, 221 .write_delay = torture_spin_lock_write_delay, 222 .task_boost = torture_rt_boost, 223 .writeunlock = torture_spin_lock_write_unlock, 224 .readlock = NULL, 225 .read_delay = NULL, 226 .readunlock = NULL, 227 .name = "spin_lock" 228 }; 229 230 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused) 231 __acquires(torture_spinlock) 232 { 233 unsigned long flags; 234 235 spin_lock_irqsave(&torture_spinlock, flags); 236 cxt.cur_ops->flags = flags; 237 return 0; 238 } 239 240 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused) 241 __releases(torture_spinlock) 242 { 243 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 244 } 245 246 static struct lock_torture_ops spin_lock_irq_ops = { 247 .writelock = torture_spin_lock_write_lock_irq, 248 .write_delay = torture_spin_lock_write_delay, 249 .task_boost = torture_rt_boost, 250 .writeunlock = torture_lock_spin_write_unlock_irq, 251 .readlock = NULL, 252 .read_delay = NULL, 253 .readunlock = NULL, 254 .name = "spin_lock_irq" 255 }; 256 257 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock); 258 259 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused) 260 __acquires(torture_raw_spinlock) 261 { 262 raw_spin_lock(&torture_raw_spinlock); 263 return 0; 264 } 265 266 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused) 267 __releases(torture_raw_spinlock) 268 { 269 raw_spin_unlock(&torture_raw_spinlock); 270 } 271 272 static struct lock_torture_ops raw_spin_lock_ops = { 273 .writelock = torture_raw_spin_lock_write_lock, 274 .write_delay = torture_spin_lock_write_delay, 275 .task_boost = torture_rt_boost, 276 .writeunlock = torture_raw_spin_lock_write_unlock, 277 .readlock = NULL, 278 .read_delay = NULL, 279 .readunlock = NULL, 280 .name = "raw_spin_lock" 281 }; 282 283 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused) 284 __acquires(torture_raw_spinlock) 285 { 286 unsigned long flags; 287 288 raw_spin_lock_irqsave(&torture_raw_spinlock, flags); 289 cxt.cur_ops->flags = flags; 290 return 0; 291 } 292 293 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused) 294 __releases(torture_raw_spinlock) 295 { 296 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags); 297 } 298 299 static struct lock_torture_ops raw_spin_lock_irq_ops = { 300 .writelock = torture_raw_spin_lock_write_lock_irq, 301 .write_delay = torture_spin_lock_write_delay, 302 .task_boost = torture_rt_boost, 303 .writeunlock = torture_raw_spin_lock_write_unlock_irq, 304 .readlock = NULL, 305 .read_delay = NULL, 306 .readunlock = NULL, 307 .name = "raw_spin_lock_irq" 308 }; 309 310 static DEFINE_RWLOCK(torture_rwlock); 311 312 static int torture_rwlock_write_lock(int tid __maybe_unused) 313 __acquires(torture_rwlock) 314 { 315 write_lock(&torture_rwlock); 316 return 0; 317 } 318 319 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 320 { 321 const unsigned long shortdelay_us = 2; 322 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX; 323 324 /* We want a short delay mostly to emulate likely code, and 325 * we want a long delay occasionally to force massive contention. 326 */ 327 if (!(torture_random(trsp) % 328 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 329 mdelay(longdelay_ms); 330 else 331 udelay(shortdelay_us); 332 } 333 334 static void torture_rwlock_write_unlock(int tid __maybe_unused) 335 __releases(torture_rwlock) 336 { 337 write_unlock(&torture_rwlock); 338 } 339 340 static int torture_rwlock_read_lock(int tid __maybe_unused) 341 __acquires(torture_rwlock) 342 { 343 read_lock(&torture_rwlock); 344 return 0; 345 } 346 347 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 348 { 349 const unsigned long shortdelay_us = 10; 350 const unsigned long longdelay_ms = 100; 351 352 /* We want a short delay mostly to emulate likely code, and 353 * we want a long delay occasionally to force massive contention. 354 */ 355 if (!(torture_random(trsp) % 356 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 357 mdelay(longdelay_ms); 358 else 359 udelay(shortdelay_us); 360 } 361 362 static void torture_rwlock_read_unlock(int tid __maybe_unused) 363 __releases(torture_rwlock) 364 { 365 read_unlock(&torture_rwlock); 366 } 367 368 static struct lock_torture_ops rw_lock_ops = { 369 .writelock = torture_rwlock_write_lock, 370 .write_delay = torture_rwlock_write_delay, 371 .task_boost = torture_rt_boost, 372 .writeunlock = torture_rwlock_write_unlock, 373 .readlock = torture_rwlock_read_lock, 374 .read_delay = torture_rwlock_read_delay, 375 .readunlock = torture_rwlock_read_unlock, 376 .name = "rw_lock" 377 }; 378 379 static int torture_rwlock_write_lock_irq(int tid __maybe_unused) 380 __acquires(torture_rwlock) 381 { 382 unsigned long flags; 383 384 write_lock_irqsave(&torture_rwlock, flags); 385 cxt.cur_ops->flags = flags; 386 return 0; 387 } 388 389 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused) 390 __releases(torture_rwlock) 391 { 392 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 393 } 394 395 static int torture_rwlock_read_lock_irq(int tid __maybe_unused) 396 __acquires(torture_rwlock) 397 { 398 unsigned long flags; 399 400 read_lock_irqsave(&torture_rwlock, flags); 401 cxt.cur_ops->flags = flags; 402 return 0; 403 } 404 405 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused) 406 __releases(torture_rwlock) 407 { 408 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 409 } 410 411 static struct lock_torture_ops rw_lock_irq_ops = { 412 .writelock = torture_rwlock_write_lock_irq, 413 .write_delay = torture_rwlock_write_delay, 414 .task_boost = torture_rt_boost, 415 .writeunlock = torture_rwlock_write_unlock_irq, 416 .readlock = torture_rwlock_read_lock_irq, 417 .read_delay = torture_rwlock_read_delay, 418 .readunlock = torture_rwlock_read_unlock_irq, 419 .name = "rw_lock_irq" 420 }; 421 422 static DEFINE_MUTEX(torture_mutex); 423 static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS]; 424 static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS]; 425 426 static void torture_mutex_init(void) 427 { 428 int i; 429 430 for (i = 0; i < MAX_NESTED_LOCKS; i++) 431 __mutex_init(&torture_nested_mutexes[i], __func__, 432 &nested_mutex_keys[i]); 433 } 434 435 static int torture_mutex_nested_lock(int tid __maybe_unused, 436 u32 lockset) 437 { 438 int i; 439 440 for (i = 0; i < nested_locks; i++) 441 if (lockset & (1 << i)) 442 mutex_lock(&torture_nested_mutexes[i]); 443 return 0; 444 } 445 446 static int torture_mutex_lock(int tid __maybe_unused) 447 __acquires(torture_mutex) 448 { 449 mutex_lock(&torture_mutex); 450 return 0; 451 } 452 453 static void torture_mutex_delay(struct torture_random_state *trsp) 454 { 455 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX; 456 457 /* We want a long delay occasionally to force massive contention. */ 458 if (!(torture_random(trsp) % 459 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 460 mdelay(longdelay_ms * 5); 461 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 462 torture_preempt_schedule(); /* Allow test to be preempted. */ 463 } 464 465 static void torture_mutex_unlock(int tid __maybe_unused) 466 __releases(torture_mutex) 467 { 468 mutex_unlock(&torture_mutex); 469 } 470 471 static void torture_mutex_nested_unlock(int tid __maybe_unused, 472 u32 lockset) 473 { 474 int i; 475 476 for (i = nested_locks - 1; i >= 0; i--) 477 if (lockset & (1 << i)) 478 mutex_unlock(&torture_nested_mutexes[i]); 479 } 480 481 static struct lock_torture_ops mutex_lock_ops = { 482 .init = torture_mutex_init, 483 .nested_lock = torture_mutex_nested_lock, 484 .writelock = torture_mutex_lock, 485 .write_delay = torture_mutex_delay, 486 .task_boost = torture_rt_boost, 487 .writeunlock = torture_mutex_unlock, 488 .nested_unlock = torture_mutex_nested_unlock, 489 .readlock = NULL, 490 .read_delay = NULL, 491 .readunlock = NULL, 492 .name = "mutex_lock" 493 }; 494 495 #include <linux/ww_mutex.h> 496 /* 497 * The torture ww_mutexes should belong to the same lock class as 498 * torture_ww_class to avoid lockdep problem. The ww_mutex_init() 499 * function is called for initialization to ensure that. 500 */ 501 static DEFINE_WD_CLASS(torture_ww_class); 502 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2; 503 static struct ww_acquire_ctx *ww_acquire_ctxs; 504 505 static void torture_ww_mutex_init(void) 506 { 507 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class); 508 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class); 509 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class); 510 511 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress, 512 sizeof(*ww_acquire_ctxs), 513 GFP_KERNEL); 514 if (!ww_acquire_ctxs) 515 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory"); 516 } 517 518 static void torture_ww_mutex_exit(void) 519 { 520 kfree(ww_acquire_ctxs); 521 } 522 523 static int torture_ww_mutex_lock(int tid) 524 __acquires(torture_ww_mutex_0) 525 __acquires(torture_ww_mutex_1) 526 __acquires(torture_ww_mutex_2) 527 { 528 LIST_HEAD(list); 529 struct reorder_lock { 530 struct list_head link; 531 struct ww_mutex *lock; 532 } locks[3], *ll, *ln; 533 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; 534 535 locks[0].lock = &torture_ww_mutex_0; 536 list_add(&locks[0].link, &list); 537 538 locks[1].lock = &torture_ww_mutex_1; 539 list_add(&locks[1].link, &list); 540 541 locks[2].lock = &torture_ww_mutex_2; 542 list_add(&locks[2].link, &list); 543 544 ww_acquire_init(ctx, &torture_ww_class); 545 546 list_for_each_entry(ll, &list, link) { 547 int err; 548 549 err = ww_mutex_lock(ll->lock, ctx); 550 if (!err) 551 continue; 552 553 ln = ll; 554 list_for_each_entry_continue_reverse(ln, &list, link) 555 ww_mutex_unlock(ln->lock); 556 557 if (err != -EDEADLK) 558 return err; 559 560 ww_mutex_lock_slow(ll->lock, ctx); 561 list_move(&ll->link, &list); 562 } 563 564 return 0; 565 } 566 567 static void torture_ww_mutex_unlock(int tid) 568 __releases(torture_ww_mutex_0) 569 __releases(torture_ww_mutex_1) 570 __releases(torture_ww_mutex_2) 571 { 572 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; 573 574 ww_mutex_unlock(&torture_ww_mutex_0); 575 ww_mutex_unlock(&torture_ww_mutex_1); 576 ww_mutex_unlock(&torture_ww_mutex_2); 577 ww_acquire_fini(ctx); 578 } 579 580 static struct lock_torture_ops ww_mutex_lock_ops = { 581 .init = torture_ww_mutex_init, 582 .exit = torture_ww_mutex_exit, 583 .writelock = torture_ww_mutex_lock, 584 .write_delay = torture_mutex_delay, 585 .task_boost = torture_rt_boost, 586 .writeunlock = torture_ww_mutex_unlock, 587 .readlock = NULL, 588 .read_delay = NULL, 589 .readunlock = NULL, 590 .name = "ww_mutex_lock" 591 }; 592 593 #ifdef CONFIG_RT_MUTEXES 594 static DEFINE_RT_MUTEX(torture_rtmutex); 595 static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS]; 596 static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS]; 597 598 static void torture_rtmutex_init(void) 599 { 600 int i; 601 602 for (i = 0; i < MAX_NESTED_LOCKS; i++) 603 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__, 604 &nested_rtmutex_keys[i]); 605 } 606 607 static int torture_rtmutex_nested_lock(int tid __maybe_unused, 608 u32 lockset) 609 { 610 int i; 611 612 for (i = 0; i < nested_locks; i++) 613 if (lockset & (1 << i)) 614 rt_mutex_lock(&torture_nested_rtmutexes[i]); 615 return 0; 616 } 617 618 static int torture_rtmutex_lock(int tid __maybe_unused) 619 __acquires(torture_rtmutex) 620 { 621 rt_mutex_lock(&torture_rtmutex); 622 return 0; 623 } 624 625 static void torture_rtmutex_delay(struct torture_random_state *trsp) 626 { 627 const unsigned long shortdelay_us = 2; 628 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX; 629 630 /* 631 * We want a short delay mostly to emulate likely code, and 632 * we want a long delay occasionally to force massive contention. 633 */ 634 if (!(torture_random(trsp) % 635 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 636 mdelay(longdelay_ms); 637 if (!(torture_random(trsp) % 638 (cxt.nrealwriters_stress * 200 * shortdelay_us))) 639 udelay(shortdelay_us); 640 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 641 torture_preempt_schedule(); /* Allow test to be preempted. */ 642 } 643 644 static void torture_rtmutex_unlock(int tid __maybe_unused) 645 __releases(torture_rtmutex) 646 { 647 rt_mutex_unlock(&torture_rtmutex); 648 } 649 650 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp) 651 { 652 if (!rt_boost) 653 return; 654 655 __torture_rt_boost(trsp); 656 } 657 658 static void torture_rtmutex_nested_unlock(int tid __maybe_unused, 659 u32 lockset) 660 { 661 int i; 662 663 for (i = nested_locks - 1; i >= 0; i--) 664 if (lockset & (1 << i)) 665 rt_mutex_unlock(&torture_nested_rtmutexes[i]); 666 } 667 668 static struct lock_torture_ops rtmutex_lock_ops = { 669 .init = torture_rtmutex_init, 670 .nested_lock = torture_rtmutex_nested_lock, 671 .writelock = torture_rtmutex_lock, 672 .write_delay = torture_rtmutex_delay, 673 .task_boost = torture_rt_boost_rtmutex, 674 .writeunlock = torture_rtmutex_unlock, 675 .nested_unlock = torture_rtmutex_nested_unlock, 676 .readlock = NULL, 677 .read_delay = NULL, 678 .readunlock = NULL, 679 .name = "rtmutex_lock" 680 }; 681 #endif 682 683 static DECLARE_RWSEM(torture_rwsem); 684 static int torture_rwsem_down_write(int tid __maybe_unused) 685 __acquires(torture_rwsem) 686 { 687 down_write(&torture_rwsem); 688 return 0; 689 } 690 691 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 692 { 693 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX; 694 695 /* We want a long delay occasionally to force massive contention. */ 696 if (!(torture_random(trsp) % 697 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 698 mdelay(longdelay_ms * 10); 699 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 700 torture_preempt_schedule(); /* Allow test to be preempted. */ 701 } 702 703 static void torture_rwsem_up_write(int tid __maybe_unused) 704 __releases(torture_rwsem) 705 { 706 up_write(&torture_rwsem); 707 } 708 709 static int torture_rwsem_down_read(int tid __maybe_unused) 710 __acquires(torture_rwsem) 711 { 712 down_read(&torture_rwsem); 713 return 0; 714 } 715 716 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 717 { 718 const unsigned long longdelay_ms = 100; 719 720 /* We want a long delay occasionally to force massive contention. */ 721 if (!(torture_random(trsp) % 722 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 723 mdelay(longdelay_ms * 2); 724 else 725 mdelay(longdelay_ms / 2); 726 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 727 torture_preempt_schedule(); /* Allow test to be preempted. */ 728 } 729 730 static void torture_rwsem_up_read(int tid __maybe_unused) 731 __releases(torture_rwsem) 732 { 733 up_read(&torture_rwsem); 734 } 735 736 static struct lock_torture_ops rwsem_lock_ops = { 737 .writelock = torture_rwsem_down_write, 738 .write_delay = torture_rwsem_write_delay, 739 .task_boost = torture_rt_boost, 740 .writeunlock = torture_rwsem_up_write, 741 .readlock = torture_rwsem_down_read, 742 .read_delay = torture_rwsem_read_delay, 743 .readunlock = torture_rwsem_up_read, 744 .name = "rwsem_lock" 745 }; 746 747 #include <linux/percpu-rwsem.h> 748 static struct percpu_rw_semaphore pcpu_rwsem; 749 750 static void torture_percpu_rwsem_init(void) 751 { 752 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 753 } 754 755 static void torture_percpu_rwsem_exit(void) 756 { 757 percpu_free_rwsem(&pcpu_rwsem); 758 } 759 760 static int torture_percpu_rwsem_down_write(int tid __maybe_unused) 761 __acquires(pcpu_rwsem) 762 { 763 percpu_down_write(&pcpu_rwsem); 764 return 0; 765 } 766 767 static void torture_percpu_rwsem_up_write(int tid __maybe_unused) 768 __releases(pcpu_rwsem) 769 { 770 percpu_up_write(&pcpu_rwsem); 771 } 772 773 static int torture_percpu_rwsem_down_read(int tid __maybe_unused) 774 __acquires(pcpu_rwsem) 775 { 776 percpu_down_read(&pcpu_rwsem); 777 return 0; 778 } 779 780 static void torture_percpu_rwsem_up_read(int tid __maybe_unused) 781 __releases(pcpu_rwsem) 782 { 783 percpu_up_read(&pcpu_rwsem); 784 } 785 786 static struct lock_torture_ops percpu_rwsem_lock_ops = { 787 .init = torture_percpu_rwsem_init, 788 .exit = torture_percpu_rwsem_exit, 789 .writelock = torture_percpu_rwsem_down_write, 790 .write_delay = torture_rwsem_write_delay, 791 .task_boost = torture_rt_boost, 792 .writeunlock = torture_percpu_rwsem_up_write, 793 .readlock = torture_percpu_rwsem_down_read, 794 .read_delay = torture_rwsem_read_delay, 795 .readunlock = torture_percpu_rwsem_up_read, 796 .name = "percpu_rwsem_lock" 797 }; 798 799 /* 800 * Lock torture writer kthread. Repeatedly acquires and releases 801 * the lock, checking for duplicate acquisitions. 802 */ 803 static int lock_torture_writer(void *arg) 804 { 805 struct lock_stress_stats *lwsp = arg; 806 int tid = lwsp - cxt.lwsa; 807 DEFINE_TORTURE_RANDOM(rand); 808 u32 lockset_mask; 809 bool skip_main_lock; 810 811 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 812 set_user_nice(current, MAX_NICE); 813 814 do { 815 if ((torture_random(&rand) & 0xfffff) == 0) 816 schedule_timeout_uninterruptible(1); 817 818 lockset_mask = torture_random(&rand); 819 /* 820 * When using nested_locks, we want to occasionally 821 * skip the main lock so we can avoid always serializing 822 * the lock chains on that central lock. By skipping the 823 * main lock occasionally, we can create different 824 * contention patterns (allowing for multiple disjoint 825 * blocked trees) 826 */ 827 skip_main_lock = (nested_locks && 828 !(torture_random(&rand) % 100)); 829 830 cxt.cur_ops->task_boost(&rand); 831 if (cxt.cur_ops->nested_lock) 832 cxt.cur_ops->nested_lock(tid, lockset_mask); 833 834 if (!skip_main_lock) { 835 cxt.cur_ops->writelock(tid); 836 if (WARN_ON_ONCE(lock_is_write_held)) 837 lwsp->n_lock_fail++; 838 lock_is_write_held = true; 839 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held))) 840 lwsp->n_lock_fail++; /* rare, but... */ 841 842 lwsp->n_lock_acquired++; 843 } 844 if (!skip_main_lock) { 845 cxt.cur_ops->write_delay(&rand); 846 lock_is_write_held = false; 847 WRITE_ONCE(last_lock_release, jiffies); 848 cxt.cur_ops->writeunlock(tid); 849 } 850 if (cxt.cur_ops->nested_unlock) 851 cxt.cur_ops->nested_unlock(tid, lockset_mask); 852 853 stutter_wait("lock_torture_writer"); 854 } while (!torture_must_stop()); 855 856 cxt.cur_ops->task_boost(NULL); /* reset prio */ 857 torture_kthread_stopping("lock_torture_writer"); 858 return 0; 859 } 860 861 /* 862 * Lock torture reader kthread. Repeatedly acquires and releases 863 * the reader lock. 864 */ 865 static int lock_torture_reader(void *arg) 866 { 867 struct lock_stress_stats *lrsp = arg; 868 int tid = lrsp - cxt.lrsa; 869 DEFINE_TORTURE_RANDOM(rand); 870 871 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 872 set_user_nice(current, MAX_NICE); 873 874 do { 875 if ((torture_random(&rand) & 0xfffff) == 0) 876 schedule_timeout_uninterruptible(1); 877 878 cxt.cur_ops->readlock(tid); 879 atomic_inc(&lock_is_read_held); 880 if (WARN_ON_ONCE(lock_is_write_held)) 881 lrsp->n_lock_fail++; /* rare, but... */ 882 883 lrsp->n_lock_acquired++; 884 cxt.cur_ops->read_delay(&rand); 885 atomic_dec(&lock_is_read_held); 886 cxt.cur_ops->readunlock(tid); 887 888 stutter_wait("lock_torture_reader"); 889 } while (!torture_must_stop()); 890 torture_kthread_stopping("lock_torture_reader"); 891 return 0; 892 } 893 894 /* 895 * Create an lock-torture-statistics message in the specified buffer. 896 */ 897 static void __torture_print_stats(char *page, 898 struct lock_stress_stats *statp, bool write) 899 { 900 long cur; 901 bool fail = false; 902 int i, n_stress; 903 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0; 904 long long sum = 0; 905 906 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 907 for (i = 0; i < n_stress; i++) { 908 if (data_race(statp[i].n_lock_fail)) 909 fail = true; 910 cur = data_race(statp[i].n_lock_acquired); 911 sum += cur; 912 if (max < cur) 913 max = cur; 914 if (min > cur) 915 min = cur; 916 } 917 page += sprintf(page, 918 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 919 write ? "Writes" : "Reads ", 920 sum, max, min, 921 !onoff_interval && max / 2 > min ? "???" : "", 922 fail, fail ? "!!!" : ""); 923 if (fail) 924 atomic_inc(&cxt.n_lock_torture_errors); 925 } 926 927 /* 928 * Print torture statistics. Caller must ensure that there is only one 929 * call to this function at a given time!!! This is normally accomplished 930 * by relying on the module system to only have one copy of the module 931 * loaded, and then by giving the lock_torture_stats kthread full control 932 * (or the init/cleanup functions when lock_torture_stats thread is not 933 * running). 934 */ 935 static void lock_torture_stats_print(void) 936 { 937 int size = cxt.nrealwriters_stress * 200 + 8192; 938 char *buf; 939 940 if (cxt.cur_ops->readlock) 941 size += cxt.nrealreaders_stress * 200 + 8192; 942 943 buf = kmalloc(size, GFP_KERNEL); 944 if (!buf) { 945 pr_err("lock_torture_stats_print: Out of memory, need: %d", 946 size); 947 return; 948 } 949 950 __torture_print_stats(buf, cxt.lwsa, true); 951 pr_alert("%s", buf); 952 kfree(buf); 953 954 if (cxt.cur_ops->readlock) { 955 buf = kmalloc(size, GFP_KERNEL); 956 if (!buf) { 957 pr_err("lock_torture_stats_print: Out of memory, need: %d", 958 size); 959 return; 960 } 961 962 __torture_print_stats(buf, cxt.lrsa, false); 963 pr_alert("%s", buf); 964 kfree(buf); 965 } 966 } 967 968 /* 969 * Periodically prints torture statistics, if periodic statistics printing 970 * was specified via the stat_interval module parameter. 971 * 972 * No need to worry about fullstop here, since this one doesn't reference 973 * volatile state or register callbacks. 974 */ 975 static int lock_torture_stats(void *arg) 976 { 977 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 978 do { 979 schedule_timeout_interruptible(stat_interval * HZ); 980 lock_torture_stats_print(); 981 torture_shutdown_absorb("lock_torture_stats"); 982 } while (!torture_must_stop()); 983 torture_kthread_stopping("lock_torture_stats"); 984 return 0; 985 } 986 987 static inline void 988 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 989 const char *tag) 990 { 991 pr_alert("%s" TORTURE_FLAG 992 "--- %s%s: nwriters_stress=%d nreaders_stress=%d nested_locks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 993 torture_type, tag, cxt.debug_lock ? " [debug]": "", 994 cxt.nrealwriters_stress, cxt.nrealreaders_stress, 995 nested_locks, stat_interval, verbose, shuffle_interval, 996 stutter, shutdown_secs, onoff_interval, onoff_holdoff); 997 } 998 999 static void lock_torture_cleanup(void) 1000 { 1001 int i; 1002 1003 if (torture_cleanup_begin()) 1004 return; 1005 1006 /* 1007 * Indicates early cleanup, meaning that the test has not run, 1008 * such as when passing bogus args when loading the module. 1009 * However cxt->cur_ops.init() may have been invoked, so beside 1010 * perform the underlying torture-specific cleanups, cur_ops.exit() 1011 * will be invoked if needed. 1012 */ 1013 if (!cxt.lwsa && !cxt.lrsa) 1014 goto end; 1015 1016 if (writer_tasks) { 1017 for (i = 0; i < cxt.nrealwriters_stress; i++) 1018 torture_stop_kthread(lock_torture_writer, 1019 writer_tasks[i]); 1020 kfree(writer_tasks); 1021 writer_tasks = NULL; 1022 } 1023 1024 if (reader_tasks) { 1025 for (i = 0; i < cxt.nrealreaders_stress; i++) 1026 torture_stop_kthread(lock_torture_reader, 1027 reader_tasks[i]); 1028 kfree(reader_tasks); 1029 reader_tasks = NULL; 1030 } 1031 1032 torture_stop_kthread(lock_torture_stats, stats_task); 1033 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 1034 1035 if (atomic_read(&cxt.n_lock_torture_errors)) 1036 lock_torture_print_module_parms(cxt.cur_ops, 1037 "End of test: FAILURE"); 1038 else if (torture_onoff_failures()) 1039 lock_torture_print_module_parms(cxt.cur_ops, 1040 "End of test: LOCK_HOTPLUG"); 1041 else 1042 lock_torture_print_module_parms(cxt.cur_ops, 1043 "End of test: SUCCESS"); 1044 1045 kfree(cxt.lwsa); 1046 cxt.lwsa = NULL; 1047 kfree(cxt.lrsa); 1048 cxt.lrsa = NULL; 1049 1050 end: 1051 if (cxt.init_called) { 1052 if (cxt.cur_ops->exit) 1053 cxt.cur_ops->exit(); 1054 cxt.init_called = false; 1055 } 1056 torture_cleanup_end(); 1057 } 1058 1059 static int __init lock_torture_init(void) 1060 { 1061 int i, j; 1062 int firsterr = 0; 1063 static struct lock_torture_ops *torture_ops[] = { 1064 &lock_busted_ops, 1065 &spin_lock_ops, &spin_lock_irq_ops, 1066 &raw_spin_lock_ops, &raw_spin_lock_irq_ops, 1067 &rw_lock_ops, &rw_lock_irq_ops, 1068 &mutex_lock_ops, 1069 &ww_mutex_lock_ops, 1070 #ifdef CONFIG_RT_MUTEXES 1071 &rtmutex_lock_ops, 1072 #endif 1073 &rwsem_lock_ops, 1074 &percpu_rwsem_lock_ops, 1075 }; 1076 1077 if (!torture_init_begin(torture_type, verbose)) 1078 return -EBUSY; 1079 1080 /* Process args and tell the world that the torturer is on the job. */ 1081 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 1082 cxt.cur_ops = torture_ops[i]; 1083 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 1084 break; 1085 } 1086 if (i == ARRAY_SIZE(torture_ops)) { 1087 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 1088 torture_type); 1089 pr_alert("lock-torture types:"); 1090 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 1091 pr_alert(" %s", torture_ops[i]->name); 1092 pr_alert("\n"); 1093 firsterr = -EINVAL; 1094 goto unwind; 1095 } 1096 1097 if (nwriters_stress == 0 && 1098 (!cxt.cur_ops->readlock || nreaders_stress == 0)) { 1099 pr_alert("lock-torture: must run at least one locking thread\n"); 1100 firsterr = -EINVAL; 1101 goto unwind; 1102 } 1103 1104 if (nwriters_stress >= 0) 1105 cxt.nrealwriters_stress = nwriters_stress; 1106 else 1107 cxt.nrealwriters_stress = 2 * num_online_cpus(); 1108 1109 if (cxt.cur_ops->init) { 1110 cxt.cur_ops->init(); 1111 cxt.init_called = true; 1112 } 1113 1114 #ifdef CONFIG_DEBUG_MUTEXES 1115 if (str_has_prefix(torture_type, "mutex")) 1116 cxt.debug_lock = true; 1117 #endif 1118 #ifdef CONFIG_DEBUG_RT_MUTEXES 1119 if (str_has_prefix(torture_type, "rtmutex")) 1120 cxt.debug_lock = true; 1121 #endif 1122 #ifdef CONFIG_DEBUG_SPINLOCK 1123 if ((str_has_prefix(torture_type, "spin")) || 1124 (str_has_prefix(torture_type, "rw_lock"))) 1125 cxt.debug_lock = true; 1126 #endif 1127 1128 /* Initialize the statistics so that each run gets its own numbers. */ 1129 if (nwriters_stress) { 1130 lock_is_write_held = false; 1131 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress, 1132 sizeof(*cxt.lwsa), 1133 GFP_KERNEL); 1134 if (cxt.lwsa == NULL) { 1135 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 1136 firsterr = -ENOMEM; 1137 goto unwind; 1138 } 1139 1140 for (i = 0; i < cxt.nrealwriters_stress; i++) { 1141 cxt.lwsa[i].n_lock_fail = 0; 1142 cxt.lwsa[i].n_lock_acquired = 0; 1143 } 1144 } 1145 1146 if (cxt.cur_ops->readlock) { 1147 if (nreaders_stress >= 0) 1148 cxt.nrealreaders_stress = nreaders_stress; 1149 else { 1150 /* 1151 * By default distribute evenly the number of 1152 * readers and writers. We still run the same number 1153 * of threads as the writer-only locks default. 1154 */ 1155 if (nwriters_stress < 0) /* user doesn't care */ 1156 cxt.nrealwriters_stress = num_online_cpus(); 1157 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 1158 } 1159 1160 if (nreaders_stress) { 1161 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress, 1162 sizeof(*cxt.lrsa), 1163 GFP_KERNEL); 1164 if (cxt.lrsa == NULL) { 1165 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 1166 firsterr = -ENOMEM; 1167 kfree(cxt.lwsa); 1168 cxt.lwsa = NULL; 1169 goto unwind; 1170 } 1171 1172 for (i = 0; i < cxt.nrealreaders_stress; i++) { 1173 cxt.lrsa[i].n_lock_fail = 0; 1174 cxt.lrsa[i].n_lock_acquired = 0; 1175 } 1176 } 1177 } 1178 1179 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 1180 1181 /* Prepare torture context. */ 1182 if (onoff_interval > 0) { 1183 firsterr = torture_onoff_init(onoff_holdoff * HZ, 1184 onoff_interval * HZ, NULL); 1185 if (torture_init_error(firsterr)) 1186 goto unwind; 1187 } 1188 if (shuffle_interval > 0) { 1189 firsterr = torture_shuffle_init(shuffle_interval); 1190 if (torture_init_error(firsterr)) 1191 goto unwind; 1192 } 1193 if (shutdown_secs > 0) { 1194 firsterr = torture_shutdown_init(shutdown_secs, 1195 lock_torture_cleanup); 1196 if (torture_init_error(firsterr)) 1197 goto unwind; 1198 } 1199 if (stutter > 0) { 1200 firsterr = torture_stutter_init(stutter, stutter); 1201 if (torture_init_error(firsterr)) 1202 goto unwind; 1203 } 1204 1205 if (nwriters_stress) { 1206 writer_tasks = kcalloc(cxt.nrealwriters_stress, 1207 sizeof(writer_tasks[0]), 1208 GFP_KERNEL); 1209 if (writer_tasks == NULL) { 1210 TOROUT_ERRSTRING("writer_tasks: Out of memory"); 1211 firsterr = -ENOMEM; 1212 goto unwind; 1213 } 1214 } 1215 1216 /* cap nested_locks to MAX_NESTED_LOCKS */ 1217 if (nested_locks > MAX_NESTED_LOCKS) 1218 nested_locks = MAX_NESTED_LOCKS; 1219 1220 if (cxt.cur_ops->readlock) { 1221 reader_tasks = kcalloc(cxt.nrealreaders_stress, 1222 sizeof(reader_tasks[0]), 1223 GFP_KERNEL); 1224 if (reader_tasks == NULL) { 1225 TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1226 kfree(writer_tasks); 1227 writer_tasks = NULL; 1228 firsterr = -ENOMEM; 1229 goto unwind; 1230 } 1231 } 1232 1233 /* 1234 * Create the kthreads and start torturing (oh, those poor little locks). 1235 * 1236 * TODO: Note that we interleave writers with readers, giving writers a 1237 * slight advantage, by creating its kthread first. This can be modified 1238 * for very specific needs, or even let the user choose the policy, if 1239 * ever wanted. 1240 */ 1241 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 1242 j < cxt.nrealreaders_stress; i++, j++) { 1243 if (i >= cxt.nrealwriters_stress) 1244 goto create_reader; 1245 1246 /* Create writer. */ 1247 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 1248 writer_tasks[i]); 1249 if (torture_init_error(firsterr)) 1250 goto unwind; 1251 1252 create_reader: 1253 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 1254 continue; 1255 /* Create reader. */ 1256 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 1257 reader_tasks[j]); 1258 if (torture_init_error(firsterr)) 1259 goto unwind; 1260 } 1261 if (stat_interval > 0) { 1262 firsterr = torture_create_kthread(lock_torture_stats, NULL, 1263 stats_task); 1264 if (torture_init_error(firsterr)) 1265 goto unwind; 1266 } 1267 torture_init_end(); 1268 return 0; 1269 1270 unwind: 1271 torture_init_end(); 1272 lock_torture_cleanup(); 1273 if (shutdown_secs) { 1274 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST)); 1275 kernel_power_off(); 1276 } 1277 return firsterr; 1278 } 1279 1280 module_init(lock_torture_init); 1281 module_exit(lock_torture_cleanup); 1282