1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Davidlohr Bueso <dave@stgolabs.net> 22 * Based on kernel/rcu/torture.c. 23 */ 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/kthread.h> 27 #include <linux/sched/rt.h> 28 #include <linux/spinlock.h> 29 #include <linux/rwlock.h> 30 #include <linux/mutex.h> 31 #include <linux/rwsem.h> 32 #include <linux/smp.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <uapi/linux/sched/types.h> 36 #include <linux/atomic.h> 37 #include <linux/moduleparam.h> 38 #include <linux/delay.h> 39 #include <linux/slab.h> 40 #include <linux/percpu-rwsem.h> 41 #include <linux/torture.h> 42 43 MODULE_LICENSE("GPL"); 44 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 45 46 torture_param(int, nwriters_stress, -1, 47 "Number of write-locking stress-test threads"); 48 torture_param(int, nreaders_stress, -1, 49 "Number of read-locking stress-test threads"); 50 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 51 torture_param(int, onoff_interval, 0, 52 "Time between CPU hotplugs (s), 0=disable"); 53 torture_param(int, shuffle_interval, 3, 54 "Number of jiffies between shuffles, 0=disable"); 55 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 56 torture_param(int, stat_interval, 60, 57 "Number of seconds between stats printk()s"); 58 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 59 torture_param(bool, verbose, true, 60 "Enable verbose debugging printk()s"); 61 62 static char *torture_type = "spin_lock"; 63 module_param(torture_type, charp, 0444); 64 MODULE_PARM_DESC(torture_type, 65 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 66 67 static struct task_struct *stats_task; 68 static struct task_struct **writer_tasks; 69 static struct task_struct **reader_tasks; 70 71 static bool lock_is_write_held; 72 static bool lock_is_read_held; 73 74 struct lock_stress_stats { 75 long n_lock_fail; 76 long n_lock_acquired; 77 }; 78 79 int torture_runnable = IS_ENABLED(MODULE); 80 module_param(torture_runnable, int, 0444); 81 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); 82 83 /* Forward reference. */ 84 static void lock_torture_cleanup(void); 85 86 /* 87 * Operations vector for selecting different types of tests. 88 */ 89 struct lock_torture_ops { 90 void (*init)(void); 91 int (*writelock)(void); 92 void (*write_delay)(struct torture_random_state *trsp); 93 void (*task_boost)(struct torture_random_state *trsp); 94 void (*writeunlock)(void); 95 int (*readlock)(void); 96 void (*read_delay)(struct torture_random_state *trsp); 97 void (*readunlock)(void); 98 99 unsigned long flags; /* for irq spinlocks */ 100 const char *name; 101 }; 102 103 struct lock_torture_cxt { 104 int nrealwriters_stress; 105 int nrealreaders_stress; 106 bool debug_lock; 107 atomic_t n_lock_torture_errors; 108 struct lock_torture_ops *cur_ops; 109 struct lock_stress_stats *lwsa; /* writer statistics */ 110 struct lock_stress_stats *lrsa; /* reader statistics */ 111 }; 112 static struct lock_torture_cxt cxt = { 0, 0, false, 113 ATOMIC_INIT(0), 114 NULL, NULL}; 115 /* 116 * Definitions for lock torture testing. 117 */ 118 119 static int torture_lock_busted_write_lock(void) 120 { 121 return 0; /* BUGGY, do not use in real life!!! */ 122 } 123 124 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 125 { 126 const unsigned long longdelay_ms = 100; 127 128 /* We want a long delay occasionally to force massive contention. */ 129 if (!(torture_random(trsp) % 130 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 131 mdelay(longdelay_ms); 132 #ifdef CONFIG_PREEMPT 133 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 134 preempt_schedule(); /* Allow test to be preempted. */ 135 #endif 136 } 137 138 static void torture_lock_busted_write_unlock(void) 139 { 140 /* BUGGY, do not use in real life!!! */ 141 } 142 143 static void torture_boost_dummy(struct torture_random_state *trsp) 144 { 145 /* Only rtmutexes care about priority */ 146 } 147 148 static struct lock_torture_ops lock_busted_ops = { 149 .writelock = torture_lock_busted_write_lock, 150 .write_delay = torture_lock_busted_write_delay, 151 .task_boost = torture_boost_dummy, 152 .writeunlock = torture_lock_busted_write_unlock, 153 .readlock = NULL, 154 .read_delay = NULL, 155 .readunlock = NULL, 156 .name = "lock_busted" 157 }; 158 159 static DEFINE_SPINLOCK(torture_spinlock); 160 161 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 162 { 163 spin_lock(&torture_spinlock); 164 return 0; 165 } 166 167 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 168 { 169 const unsigned long shortdelay_us = 2; 170 const unsigned long longdelay_ms = 100; 171 172 /* We want a short delay mostly to emulate likely code, and 173 * we want a long delay occasionally to force massive contention. 174 */ 175 if (!(torture_random(trsp) % 176 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 177 mdelay(longdelay_ms); 178 if (!(torture_random(trsp) % 179 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 180 udelay(shortdelay_us); 181 #ifdef CONFIG_PREEMPT 182 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 183 preempt_schedule(); /* Allow test to be preempted. */ 184 #endif 185 } 186 187 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 188 { 189 spin_unlock(&torture_spinlock); 190 } 191 192 static struct lock_torture_ops spin_lock_ops = { 193 .writelock = torture_spin_lock_write_lock, 194 .write_delay = torture_spin_lock_write_delay, 195 .task_boost = torture_boost_dummy, 196 .writeunlock = torture_spin_lock_write_unlock, 197 .readlock = NULL, 198 .read_delay = NULL, 199 .readunlock = NULL, 200 .name = "spin_lock" 201 }; 202 203 static int torture_spin_lock_write_lock_irq(void) 204 __acquires(torture_spinlock) 205 { 206 unsigned long flags; 207 208 spin_lock_irqsave(&torture_spinlock, flags); 209 cxt.cur_ops->flags = flags; 210 return 0; 211 } 212 213 static void torture_lock_spin_write_unlock_irq(void) 214 __releases(torture_spinlock) 215 { 216 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 217 } 218 219 static struct lock_torture_ops spin_lock_irq_ops = { 220 .writelock = torture_spin_lock_write_lock_irq, 221 .write_delay = torture_spin_lock_write_delay, 222 .task_boost = torture_boost_dummy, 223 .writeunlock = torture_lock_spin_write_unlock_irq, 224 .readlock = NULL, 225 .read_delay = NULL, 226 .readunlock = NULL, 227 .name = "spin_lock_irq" 228 }; 229 230 static DEFINE_RWLOCK(torture_rwlock); 231 232 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) 233 { 234 write_lock(&torture_rwlock); 235 return 0; 236 } 237 238 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 239 { 240 const unsigned long shortdelay_us = 2; 241 const unsigned long longdelay_ms = 100; 242 243 /* We want a short delay mostly to emulate likely code, and 244 * we want a long delay occasionally to force massive contention. 245 */ 246 if (!(torture_random(trsp) % 247 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 248 mdelay(longdelay_ms); 249 else 250 udelay(shortdelay_us); 251 } 252 253 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) 254 { 255 write_unlock(&torture_rwlock); 256 } 257 258 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) 259 { 260 read_lock(&torture_rwlock); 261 return 0; 262 } 263 264 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 265 { 266 const unsigned long shortdelay_us = 10; 267 const unsigned long longdelay_ms = 100; 268 269 /* We want a short delay mostly to emulate likely code, and 270 * we want a long delay occasionally to force massive contention. 271 */ 272 if (!(torture_random(trsp) % 273 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 274 mdelay(longdelay_ms); 275 else 276 udelay(shortdelay_us); 277 } 278 279 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) 280 { 281 read_unlock(&torture_rwlock); 282 } 283 284 static struct lock_torture_ops rw_lock_ops = { 285 .writelock = torture_rwlock_write_lock, 286 .write_delay = torture_rwlock_write_delay, 287 .task_boost = torture_boost_dummy, 288 .writeunlock = torture_rwlock_write_unlock, 289 .readlock = torture_rwlock_read_lock, 290 .read_delay = torture_rwlock_read_delay, 291 .readunlock = torture_rwlock_read_unlock, 292 .name = "rw_lock" 293 }; 294 295 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) 296 { 297 unsigned long flags; 298 299 write_lock_irqsave(&torture_rwlock, flags); 300 cxt.cur_ops->flags = flags; 301 return 0; 302 } 303 304 static void torture_rwlock_write_unlock_irq(void) 305 __releases(torture_rwlock) 306 { 307 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 308 } 309 310 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) 311 { 312 unsigned long flags; 313 314 read_lock_irqsave(&torture_rwlock, flags); 315 cxt.cur_ops->flags = flags; 316 return 0; 317 } 318 319 static void torture_rwlock_read_unlock_irq(void) 320 __releases(torture_rwlock) 321 { 322 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 323 } 324 325 static struct lock_torture_ops rw_lock_irq_ops = { 326 .writelock = torture_rwlock_write_lock_irq, 327 .write_delay = torture_rwlock_write_delay, 328 .task_boost = torture_boost_dummy, 329 .writeunlock = torture_rwlock_write_unlock_irq, 330 .readlock = torture_rwlock_read_lock_irq, 331 .read_delay = torture_rwlock_read_delay, 332 .readunlock = torture_rwlock_read_unlock_irq, 333 .name = "rw_lock_irq" 334 }; 335 336 static DEFINE_MUTEX(torture_mutex); 337 338 static int torture_mutex_lock(void) __acquires(torture_mutex) 339 { 340 mutex_lock(&torture_mutex); 341 return 0; 342 } 343 344 static void torture_mutex_delay(struct torture_random_state *trsp) 345 { 346 const unsigned long longdelay_ms = 100; 347 348 /* We want a long delay occasionally to force massive contention. */ 349 if (!(torture_random(trsp) % 350 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 351 mdelay(longdelay_ms * 5); 352 else 353 mdelay(longdelay_ms / 5); 354 #ifdef CONFIG_PREEMPT 355 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 356 preempt_schedule(); /* Allow test to be preempted. */ 357 #endif 358 } 359 360 static void torture_mutex_unlock(void) __releases(torture_mutex) 361 { 362 mutex_unlock(&torture_mutex); 363 } 364 365 static struct lock_torture_ops mutex_lock_ops = { 366 .writelock = torture_mutex_lock, 367 .write_delay = torture_mutex_delay, 368 .task_boost = torture_boost_dummy, 369 .writeunlock = torture_mutex_unlock, 370 .readlock = NULL, 371 .read_delay = NULL, 372 .readunlock = NULL, 373 .name = "mutex_lock" 374 }; 375 376 #include <linux/ww_mutex.h> 377 static DEFINE_WW_CLASS(torture_ww_class); 378 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); 379 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); 380 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); 381 382 static int torture_ww_mutex_lock(void) 383 __acquires(torture_ww_mutex_0) 384 __acquires(torture_ww_mutex_1) 385 __acquires(torture_ww_mutex_2) 386 { 387 LIST_HEAD(list); 388 struct reorder_lock { 389 struct list_head link; 390 struct ww_mutex *lock; 391 } locks[3], *ll, *ln; 392 struct ww_acquire_ctx ctx; 393 394 locks[0].lock = &torture_ww_mutex_0; 395 list_add(&locks[0].link, &list); 396 397 locks[1].lock = &torture_ww_mutex_1; 398 list_add(&locks[1].link, &list); 399 400 locks[2].lock = &torture_ww_mutex_2; 401 list_add(&locks[2].link, &list); 402 403 ww_acquire_init(&ctx, &torture_ww_class); 404 405 list_for_each_entry(ll, &list, link) { 406 int err; 407 408 err = ww_mutex_lock(ll->lock, &ctx); 409 if (!err) 410 continue; 411 412 ln = ll; 413 list_for_each_entry_continue_reverse(ln, &list, link) 414 ww_mutex_unlock(ln->lock); 415 416 if (err != -EDEADLK) 417 return err; 418 419 ww_mutex_lock_slow(ll->lock, &ctx); 420 list_move(&ll->link, &list); 421 } 422 423 ww_acquire_fini(&ctx); 424 return 0; 425 } 426 427 static void torture_ww_mutex_unlock(void) 428 __releases(torture_ww_mutex_0) 429 __releases(torture_ww_mutex_1) 430 __releases(torture_ww_mutex_2) 431 { 432 ww_mutex_unlock(&torture_ww_mutex_0); 433 ww_mutex_unlock(&torture_ww_mutex_1); 434 ww_mutex_unlock(&torture_ww_mutex_2); 435 } 436 437 static struct lock_torture_ops ww_mutex_lock_ops = { 438 .writelock = torture_ww_mutex_lock, 439 .write_delay = torture_mutex_delay, 440 .task_boost = torture_boost_dummy, 441 .writeunlock = torture_ww_mutex_unlock, 442 .readlock = NULL, 443 .read_delay = NULL, 444 .readunlock = NULL, 445 .name = "ww_mutex_lock" 446 }; 447 448 #ifdef CONFIG_RT_MUTEXES 449 static DEFINE_RT_MUTEX(torture_rtmutex); 450 451 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) 452 { 453 rt_mutex_lock(&torture_rtmutex); 454 return 0; 455 } 456 457 static void torture_rtmutex_boost(struct torture_random_state *trsp) 458 { 459 int policy; 460 struct sched_param param; 461 const unsigned int factor = 50000; /* yes, quite arbitrary */ 462 463 if (!rt_task(current)) { 464 /* 465 * Boost priority once every ~50k operations. When the 466 * task tries to take the lock, the rtmutex it will account 467 * for the new priority, and do any corresponding pi-dance. 468 */ 469 if (trsp && !(torture_random(trsp) % 470 (cxt.nrealwriters_stress * factor))) { 471 policy = SCHED_FIFO; 472 param.sched_priority = MAX_RT_PRIO - 1; 473 } else /* common case, do nothing */ 474 return; 475 } else { 476 /* 477 * The task will remain boosted for another ~500k operations, 478 * then restored back to its original prio, and so forth. 479 * 480 * When @trsp is nil, we want to force-reset the task for 481 * stopping the kthread. 482 */ 483 if (!trsp || !(torture_random(trsp) % 484 (cxt.nrealwriters_stress * factor * 2))) { 485 policy = SCHED_NORMAL; 486 param.sched_priority = 0; 487 } else /* common case, do nothing */ 488 return; 489 } 490 491 sched_setscheduler_nocheck(current, policy, ¶m); 492 } 493 494 static void torture_rtmutex_delay(struct torture_random_state *trsp) 495 { 496 const unsigned long shortdelay_us = 2; 497 const unsigned long longdelay_ms = 100; 498 499 /* 500 * We want a short delay mostly to emulate likely code, and 501 * we want a long delay occasionally to force massive contention. 502 */ 503 if (!(torture_random(trsp) % 504 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 505 mdelay(longdelay_ms); 506 if (!(torture_random(trsp) % 507 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 508 udelay(shortdelay_us); 509 #ifdef CONFIG_PREEMPT 510 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 511 preempt_schedule(); /* Allow test to be preempted. */ 512 #endif 513 } 514 515 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) 516 { 517 rt_mutex_unlock(&torture_rtmutex); 518 } 519 520 static struct lock_torture_ops rtmutex_lock_ops = { 521 .writelock = torture_rtmutex_lock, 522 .write_delay = torture_rtmutex_delay, 523 .task_boost = torture_rtmutex_boost, 524 .writeunlock = torture_rtmutex_unlock, 525 .readlock = NULL, 526 .read_delay = NULL, 527 .readunlock = NULL, 528 .name = "rtmutex_lock" 529 }; 530 #endif 531 532 static DECLARE_RWSEM(torture_rwsem); 533 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 534 { 535 down_write(&torture_rwsem); 536 return 0; 537 } 538 539 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 540 { 541 const unsigned long longdelay_ms = 100; 542 543 /* We want a long delay occasionally to force massive contention. */ 544 if (!(torture_random(trsp) % 545 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 546 mdelay(longdelay_ms * 10); 547 else 548 mdelay(longdelay_ms / 10); 549 #ifdef CONFIG_PREEMPT 550 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 551 preempt_schedule(); /* Allow test to be preempted. */ 552 #endif 553 } 554 555 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 556 { 557 up_write(&torture_rwsem); 558 } 559 560 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 561 { 562 down_read(&torture_rwsem); 563 return 0; 564 } 565 566 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 567 { 568 const unsigned long longdelay_ms = 100; 569 570 /* We want a long delay occasionally to force massive contention. */ 571 if (!(torture_random(trsp) % 572 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 573 mdelay(longdelay_ms * 2); 574 else 575 mdelay(longdelay_ms / 2); 576 #ifdef CONFIG_PREEMPT 577 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 578 preempt_schedule(); /* Allow test to be preempted. */ 579 #endif 580 } 581 582 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 583 { 584 up_read(&torture_rwsem); 585 } 586 587 static struct lock_torture_ops rwsem_lock_ops = { 588 .writelock = torture_rwsem_down_write, 589 .write_delay = torture_rwsem_write_delay, 590 .task_boost = torture_boost_dummy, 591 .writeunlock = torture_rwsem_up_write, 592 .readlock = torture_rwsem_down_read, 593 .read_delay = torture_rwsem_read_delay, 594 .readunlock = torture_rwsem_up_read, 595 .name = "rwsem_lock" 596 }; 597 598 #include <linux/percpu-rwsem.h> 599 static struct percpu_rw_semaphore pcpu_rwsem; 600 601 void torture_percpu_rwsem_init(void) 602 { 603 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 604 } 605 606 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) 607 { 608 percpu_down_write(&pcpu_rwsem); 609 return 0; 610 } 611 612 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) 613 { 614 percpu_up_write(&pcpu_rwsem); 615 } 616 617 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) 618 { 619 percpu_down_read(&pcpu_rwsem); 620 return 0; 621 } 622 623 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) 624 { 625 percpu_up_read(&pcpu_rwsem); 626 } 627 628 static struct lock_torture_ops percpu_rwsem_lock_ops = { 629 .init = torture_percpu_rwsem_init, 630 .writelock = torture_percpu_rwsem_down_write, 631 .write_delay = torture_rwsem_write_delay, 632 .task_boost = torture_boost_dummy, 633 .writeunlock = torture_percpu_rwsem_up_write, 634 .readlock = torture_percpu_rwsem_down_read, 635 .read_delay = torture_rwsem_read_delay, 636 .readunlock = torture_percpu_rwsem_up_read, 637 .name = "percpu_rwsem_lock" 638 }; 639 640 /* 641 * Lock torture writer kthread. Repeatedly acquires and releases 642 * the lock, checking for duplicate acquisitions. 643 */ 644 static int lock_torture_writer(void *arg) 645 { 646 struct lock_stress_stats *lwsp = arg; 647 static DEFINE_TORTURE_RANDOM(rand); 648 649 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 650 set_user_nice(current, MAX_NICE); 651 652 do { 653 if ((torture_random(&rand) & 0xfffff) == 0) 654 schedule_timeout_uninterruptible(1); 655 656 cxt.cur_ops->task_boost(&rand); 657 cxt.cur_ops->writelock(); 658 if (WARN_ON_ONCE(lock_is_write_held)) 659 lwsp->n_lock_fail++; 660 lock_is_write_held = 1; 661 if (WARN_ON_ONCE(lock_is_read_held)) 662 lwsp->n_lock_fail++; /* rare, but... */ 663 664 lwsp->n_lock_acquired++; 665 cxt.cur_ops->write_delay(&rand); 666 lock_is_write_held = 0; 667 cxt.cur_ops->writeunlock(); 668 669 stutter_wait("lock_torture_writer"); 670 } while (!torture_must_stop()); 671 672 cxt.cur_ops->task_boost(NULL); /* reset prio */ 673 torture_kthread_stopping("lock_torture_writer"); 674 return 0; 675 } 676 677 /* 678 * Lock torture reader kthread. Repeatedly acquires and releases 679 * the reader lock. 680 */ 681 static int lock_torture_reader(void *arg) 682 { 683 struct lock_stress_stats *lrsp = arg; 684 static DEFINE_TORTURE_RANDOM(rand); 685 686 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 687 set_user_nice(current, MAX_NICE); 688 689 do { 690 if ((torture_random(&rand) & 0xfffff) == 0) 691 schedule_timeout_uninterruptible(1); 692 693 cxt.cur_ops->readlock(); 694 lock_is_read_held = 1; 695 if (WARN_ON_ONCE(lock_is_write_held)) 696 lrsp->n_lock_fail++; /* rare, but... */ 697 698 lrsp->n_lock_acquired++; 699 cxt.cur_ops->read_delay(&rand); 700 lock_is_read_held = 0; 701 cxt.cur_ops->readunlock(); 702 703 stutter_wait("lock_torture_reader"); 704 } while (!torture_must_stop()); 705 torture_kthread_stopping("lock_torture_reader"); 706 return 0; 707 } 708 709 /* 710 * Create an lock-torture-statistics message in the specified buffer. 711 */ 712 static void __torture_print_stats(char *page, 713 struct lock_stress_stats *statp, bool write) 714 { 715 bool fail = 0; 716 int i, n_stress; 717 long max = 0; 718 long min = statp[0].n_lock_acquired; 719 long long sum = 0; 720 721 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 722 for (i = 0; i < n_stress; i++) { 723 if (statp[i].n_lock_fail) 724 fail = true; 725 sum += statp[i].n_lock_acquired; 726 if (max < statp[i].n_lock_fail) 727 max = statp[i].n_lock_fail; 728 if (min > statp[i].n_lock_fail) 729 min = statp[i].n_lock_fail; 730 } 731 page += sprintf(page, 732 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 733 write ? "Writes" : "Reads ", 734 sum, max, min, max / 2 > min ? "???" : "", 735 fail, fail ? "!!!" : ""); 736 if (fail) 737 atomic_inc(&cxt.n_lock_torture_errors); 738 } 739 740 /* 741 * Print torture statistics. Caller must ensure that there is only one 742 * call to this function at a given time!!! This is normally accomplished 743 * by relying on the module system to only have one copy of the module 744 * loaded, and then by giving the lock_torture_stats kthread full control 745 * (or the init/cleanup functions when lock_torture_stats thread is not 746 * running). 747 */ 748 static void lock_torture_stats_print(void) 749 { 750 int size = cxt.nrealwriters_stress * 200 + 8192; 751 char *buf; 752 753 if (cxt.cur_ops->readlock) 754 size += cxt.nrealreaders_stress * 200 + 8192; 755 756 buf = kmalloc(size, GFP_KERNEL); 757 if (!buf) { 758 pr_err("lock_torture_stats_print: Out of memory, need: %d", 759 size); 760 return; 761 } 762 763 __torture_print_stats(buf, cxt.lwsa, true); 764 pr_alert("%s", buf); 765 kfree(buf); 766 767 if (cxt.cur_ops->readlock) { 768 buf = kmalloc(size, GFP_KERNEL); 769 if (!buf) { 770 pr_err("lock_torture_stats_print: Out of memory, need: %d", 771 size); 772 return; 773 } 774 775 __torture_print_stats(buf, cxt.lrsa, false); 776 pr_alert("%s", buf); 777 kfree(buf); 778 } 779 } 780 781 /* 782 * Periodically prints torture statistics, if periodic statistics printing 783 * was specified via the stat_interval module parameter. 784 * 785 * No need to worry about fullstop here, since this one doesn't reference 786 * volatile state or register callbacks. 787 */ 788 static int lock_torture_stats(void *arg) 789 { 790 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 791 do { 792 schedule_timeout_interruptible(stat_interval * HZ); 793 lock_torture_stats_print(); 794 torture_shutdown_absorb("lock_torture_stats"); 795 } while (!torture_must_stop()); 796 torture_kthread_stopping("lock_torture_stats"); 797 return 0; 798 } 799 800 static inline void 801 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 802 const char *tag) 803 { 804 pr_alert("%s" TORTURE_FLAG 805 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 806 torture_type, tag, cxt.debug_lock ? " [debug]": "", 807 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 808 verbose, shuffle_interval, stutter, shutdown_secs, 809 onoff_interval, onoff_holdoff); 810 } 811 812 static void lock_torture_cleanup(void) 813 { 814 int i; 815 816 if (torture_cleanup_begin()) 817 return; 818 819 /* 820 * Indicates early cleanup, meaning that the test has not run, 821 * such as when passing bogus args when loading the module. As 822 * such, only perform the underlying torture-specific cleanups, 823 * and avoid anything related to locktorture. 824 */ 825 if (!cxt.lwsa) 826 goto end; 827 828 if (writer_tasks) { 829 for (i = 0; i < cxt.nrealwriters_stress; i++) 830 torture_stop_kthread(lock_torture_writer, 831 writer_tasks[i]); 832 kfree(writer_tasks); 833 writer_tasks = NULL; 834 } 835 836 if (reader_tasks) { 837 for (i = 0; i < cxt.nrealreaders_stress; i++) 838 torture_stop_kthread(lock_torture_reader, 839 reader_tasks[i]); 840 kfree(reader_tasks); 841 reader_tasks = NULL; 842 } 843 844 torture_stop_kthread(lock_torture_stats, stats_task); 845 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 846 847 if (atomic_read(&cxt.n_lock_torture_errors)) 848 lock_torture_print_module_parms(cxt.cur_ops, 849 "End of test: FAILURE"); 850 else if (torture_onoff_failures()) 851 lock_torture_print_module_parms(cxt.cur_ops, 852 "End of test: LOCK_HOTPLUG"); 853 else 854 lock_torture_print_module_parms(cxt.cur_ops, 855 "End of test: SUCCESS"); 856 857 kfree(cxt.lwsa); 858 kfree(cxt.lrsa); 859 860 end: 861 torture_cleanup_end(); 862 } 863 864 static int __init lock_torture_init(void) 865 { 866 int i, j; 867 int firsterr = 0; 868 static struct lock_torture_ops *torture_ops[] = { 869 &lock_busted_ops, 870 &spin_lock_ops, &spin_lock_irq_ops, 871 &rw_lock_ops, &rw_lock_irq_ops, 872 &mutex_lock_ops, 873 &ww_mutex_lock_ops, 874 #ifdef CONFIG_RT_MUTEXES 875 &rtmutex_lock_ops, 876 #endif 877 &rwsem_lock_ops, 878 &percpu_rwsem_lock_ops, 879 }; 880 881 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 882 return -EBUSY; 883 884 /* Process args and tell the world that the torturer is on the job. */ 885 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 886 cxt.cur_ops = torture_ops[i]; 887 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 888 break; 889 } 890 if (i == ARRAY_SIZE(torture_ops)) { 891 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 892 torture_type); 893 pr_alert("lock-torture types:"); 894 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 895 pr_alert(" %s", torture_ops[i]->name); 896 pr_alert("\n"); 897 firsterr = -EINVAL; 898 goto unwind; 899 } 900 if (cxt.cur_ops->init) 901 cxt.cur_ops->init(); 902 903 if (nwriters_stress >= 0) 904 cxt.nrealwriters_stress = nwriters_stress; 905 else 906 cxt.nrealwriters_stress = 2 * num_online_cpus(); 907 908 #ifdef CONFIG_DEBUG_MUTEXES 909 if (strncmp(torture_type, "mutex", 5) == 0) 910 cxt.debug_lock = true; 911 #endif 912 #ifdef CONFIG_DEBUG_RT_MUTEXES 913 if (strncmp(torture_type, "rtmutex", 7) == 0) 914 cxt.debug_lock = true; 915 #endif 916 #ifdef CONFIG_DEBUG_SPINLOCK 917 if ((strncmp(torture_type, "spin", 4) == 0) || 918 (strncmp(torture_type, "rw_lock", 7) == 0)) 919 cxt.debug_lock = true; 920 #endif 921 922 /* Initialize the statistics so that each run gets its own numbers. */ 923 924 lock_is_write_held = 0; 925 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); 926 if (cxt.lwsa == NULL) { 927 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 928 firsterr = -ENOMEM; 929 goto unwind; 930 } 931 for (i = 0; i < cxt.nrealwriters_stress; i++) { 932 cxt.lwsa[i].n_lock_fail = 0; 933 cxt.lwsa[i].n_lock_acquired = 0; 934 } 935 936 if (cxt.cur_ops->readlock) { 937 if (nreaders_stress >= 0) 938 cxt.nrealreaders_stress = nreaders_stress; 939 else { 940 /* 941 * By default distribute evenly the number of 942 * readers and writers. We still run the same number 943 * of threads as the writer-only locks default. 944 */ 945 if (nwriters_stress < 0) /* user doesn't care */ 946 cxt.nrealwriters_stress = num_online_cpus(); 947 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 948 } 949 950 lock_is_read_held = 0; 951 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); 952 if (cxt.lrsa == NULL) { 953 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 954 firsterr = -ENOMEM; 955 kfree(cxt.lwsa); 956 cxt.lwsa = NULL; 957 goto unwind; 958 } 959 960 for (i = 0; i < cxt.nrealreaders_stress; i++) { 961 cxt.lrsa[i].n_lock_fail = 0; 962 cxt.lrsa[i].n_lock_acquired = 0; 963 } 964 } 965 966 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 967 968 /* Prepare torture context. */ 969 if (onoff_interval > 0) { 970 firsterr = torture_onoff_init(onoff_holdoff * HZ, 971 onoff_interval * HZ); 972 if (firsterr) 973 goto unwind; 974 } 975 if (shuffle_interval > 0) { 976 firsterr = torture_shuffle_init(shuffle_interval); 977 if (firsterr) 978 goto unwind; 979 } 980 if (shutdown_secs > 0) { 981 firsterr = torture_shutdown_init(shutdown_secs, 982 lock_torture_cleanup); 983 if (firsterr) 984 goto unwind; 985 } 986 if (stutter > 0) { 987 firsterr = torture_stutter_init(stutter); 988 if (firsterr) 989 goto unwind; 990 } 991 992 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), 993 GFP_KERNEL); 994 if (writer_tasks == NULL) { 995 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 996 firsterr = -ENOMEM; 997 goto unwind; 998 } 999 1000 if (cxt.cur_ops->readlock) { 1001 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]), 1002 GFP_KERNEL); 1003 if (reader_tasks == NULL) { 1004 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1005 kfree(writer_tasks); 1006 writer_tasks = NULL; 1007 firsterr = -ENOMEM; 1008 goto unwind; 1009 } 1010 } 1011 1012 /* 1013 * Create the kthreads and start torturing (oh, those poor little locks). 1014 * 1015 * TODO: Note that we interleave writers with readers, giving writers a 1016 * slight advantage, by creating its kthread first. This can be modified 1017 * for very specific needs, or even let the user choose the policy, if 1018 * ever wanted. 1019 */ 1020 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 1021 j < cxt.nrealreaders_stress; i++, j++) { 1022 if (i >= cxt.nrealwriters_stress) 1023 goto create_reader; 1024 1025 /* Create writer. */ 1026 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 1027 writer_tasks[i]); 1028 if (firsterr) 1029 goto unwind; 1030 1031 create_reader: 1032 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 1033 continue; 1034 /* Create reader. */ 1035 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 1036 reader_tasks[j]); 1037 if (firsterr) 1038 goto unwind; 1039 } 1040 if (stat_interval > 0) { 1041 firsterr = torture_create_kthread(lock_torture_stats, NULL, 1042 stats_task); 1043 if (firsterr) 1044 goto unwind; 1045 } 1046 torture_init_end(); 1047 return 0; 1048 1049 unwind: 1050 torture_init_end(); 1051 lock_torture_cleanup(); 1052 return firsterr; 1053 } 1054 1055 module_init(lock_torture_init); 1056 module_exit(lock_torture_cleanup); 1057