1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Author: Paul E. McKenney <paulmck@us.ibm.com> 21 * Based on kernel/rcu/torture.c. 22 */ 23 #include <linux/types.h> 24 #include <linux/kernel.h> 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/kthread.h> 28 #include <linux/err.h> 29 #include <linux/spinlock.h> 30 #include <linux/mutex.h> 31 #include <linux/smp.h> 32 #include <linux/interrupt.h> 33 #include <linux/sched.h> 34 #include <linux/atomic.h> 35 #include <linux/bitops.h> 36 #include <linux/completion.h> 37 #include <linux/moduleparam.h> 38 #include <linux/percpu.h> 39 #include <linux/notifier.h> 40 #include <linux/reboot.h> 41 #include <linux/freezer.h> 42 #include <linux/cpu.h> 43 #include <linux/delay.h> 44 #include <linux/stat.h> 45 #include <linux/slab.h> 46 #include <linux/trace_clock.h> 47 #include <asm/byteorder.h> 48 #include <linux/torture.h> 49 50 MODULE_LICENSE("GPL"); 51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 52 53 torture_param(int, nwriters_stress, -1, 54 "Number of write-locking stress-test threads"); 55 torture_param(int, nreaders_stress, -1, 56 "Number of read-locking stress-test threads"); 57 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 58 torture_param(int, onoff_interval, 0, 59 "Time between CPU hotplugs (s), 0=disable"); 60 torture_param(int, shuffle_interval, 3, 61 "Number of jiffies between shuffles, 0=disable"); 62 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 63 torture_param(int, stat_interval, 60, 64 "Number of seconds between stats printk()s"); 65 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 66 torture_param(bool, verbose, true, 67 "Enable verbose debugging printk()s"); 68 69 static char *torture_type = "spin_lock"; 70 module_param(torture_type, charp, 0444); 71 MODULE_PARM_DESC(torture_type, 72 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 73 74 static struct task_struct *stats_task; 75 static struct task_struct **writer_tasks; 76 static struct task_struct **reader_tasks; 77 78 static bool lock_is_write_held; 79 static bool lock_is_read_held; 80 81 struct lock_stress_stats { 82 long n_lock_fail; 83 long n_lock_acquired; 84 }; 85 86 #if defined(MODULE) 87 #define LOCKTORTURE_RUNNABLE_INIT 1 88 #else 89 #define LOCKTORTURE_RUNNABLE_INIT 0 90 #endif 91 int torture_runnable = LOCKTORTURE_RUNNABLE_INIT; 92 module_param(torture_runnable, int, 0444); 93 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); 94 95 /* Forward reference. */ 96 static void lock_torture_cleanup(void); 97 98 /* 99 * Operations vector for selecting different types of tests. 100 */ 101 struct lock_torture_ops { 102 void (*init)(void); 103 int (*writelock)(void); 104 void (*write_delay)(struct torture_random_state *trsp); 105 void (*writeunlock)(void); 106 int (*readlock)(void); 107 void (*read_delay)(struct torture_random_state *trsp); 108 void (*readunlock)(void); 109 unsigned long flags; 110 const char *name; 111 }; 112 113 struct lock_torture_cxt { 114 int nrealwriters_stress; 115 int nrealreaders_stress; 116 bool debug_lock; 117 atomic_t n_lock_torture_errors; 118 struct lock_torture_ops *cur_ops; 119 struct lock_stress_stats *lwsa; /* writer statistics */ 120 struct lock_stress_stats *lrsa; /* reader statistics */ 121 }; 122 static struct lock_torture_cxt cxt = { 0, 0, false, 123 ATOMIC_INIT(0), 124 NULL, NULL}; 125 /* 126 * Definitions for lock torture testing. 127 */ 128 129 static int torture_lock_busted_write_lock(void) 130 { 131 return 0; /* BUGGY, do not use in real life!!! */ 132 } 133 134 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 135 { 136 const unsigned long longdelay_us = 100; 137 138 /* We want a long delay occasionally to force massive contention. */ 139 if (!(torture_random(trsp) % 140 (cxt.nrealwriters_stress * 2000 * longdelay_us))) 141 mdelay(longdelay_us); 142 #ifdef CONFIG_PREEMPT 143 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 144 preempt_schedule(); /* Allow test to be preempted. */ 145 #endif 146 } 147 148 static void torture_lock_busted_write_unlock(void) 149 { 150 /* BUGGY, do not use in real life!!! */ 151 } 152 153 static struct lock_torture_ops lock_busted_ops = { 154 .writelock = torture_lock_busted_write_lock, 155 .write_delay = torture_lock_busted_write_delay, 156 .writeunlock = torture_lock_busted_write_unlock, 157 .readlock = NULL, 158 .read_delay = NULL, 159 .readunlock = NULL, 160 .name = "lock_busted" 161 }; 162 163 static DEFINE_SPINLOCK(torture_spinlock); 164 165 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 166 { 167 spin_lock(&torture_spinlock); 168 return 0; 169 } 170 171 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 172 { 173 const unsigned long shortdelay_us = 2; 174 const unsigned long longdelay_us = 100; 175 176 /* We want a short delay mostly to emulate likely code, and 177 * we want a long delay occasionally to force massive contention. 178 */ 179 if (!(torture_random(trsp) % 180 (cxt.nrealwriters_stress * 2000 * longdelay_us))) 181 mdelay(longdelay_us); 182 if (!(torture_random(trsp) % 183 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 184 udelay(shortdelay_us); 185 #ifdef CONFIG_PREEMPT 186 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 187 preempt_schedule(); /* Allow test to be preempted. */ 188 #endif 189 } 190 191 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 192 { 193 spin_unlock(&torture_spinlock); 194 } 195 196 static struct lock_torture_ops spin_lock_ops = { 197 .writelock = torture_spin_lock_write_lock, 198 .write_delay = torture_spin_lock_write_delay, 199 .writeunlock = torture_spin_lock_write_unlock, 200 .readlock = NULL, 201 .read_delay = NULL, 202 .readunlock = NULL, 203 .name = "spin_lock" 204 }; 205 206 static int torture_spin_lock_write_lock_irq(void) 207 __acquires(torture_spinlock_irq) 208 { 209 unsigned long flags; 210 211 spin_lock_irqsave(&torture_spinlock, flags); 212 cxt.cur_ops->flags = flags; 213 return 0; 214 } 215 216 static void torture_lock_spin_write_unlock_irq(void) 217 __releases(torture_spinlock) 218 { 219 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 220 } 221 222 static struct lock_torture_ops spin_lock_irq_ops = { 223 .writelock = torture_spin_lock_write_lock_irq, 224 .write_delay = torture_spin_lock_write_delay, 225 .writeunlock = torture_lock_spin_write_unlock_irq, 226 .readlock = NULL, 227 .read_delay = NULL, 228 .readunlock = NULL, 229 .name = "spin_lock_irq" 230 }; 231 232 static DEFINE_MUTEX(torture_mutex); 233 234 static int torture_mutex_lock(void) __acquires(torture_mutex) 235 { 236 mutex_lock(&torture_mutex); 237 return 0; 238 } 239 240 static void torture_mutex_delay(struct torture_random_state *trsp) 241 { 242 const unsigned long longdelay_ms = 100; 243 244 /* We want a long delay occasionally to force massive contention. */ 245 if (!(torture_random(trsp) % 246 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 247 mdelay(longdelay_ms * 5); 248 else 249 mdelay(longdelay_ms / 5); 250 #ifdef CONFIG_PREEMPT 251 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 252 preempt_schedule(); /* Allow test to be preempted. */ 253 #endif 254 } 255 256 static void torture_mutex_unlock(void) __releases(torture_mutex) 257 { 258 mutex_unlock(&torture_mutex); 259 } 260 261 static struct lock_torture_ops mutex_lock_ops = { 262 .writelock = torture_mutex_lock, 263 .write_delay = torture_mutex_delay, 264 .writeunlock = torture_mutex_unlock, 265 .readlock = NULL, 266 .read_delay = NULL, 267 .readunlock = NULL, 268 .name = "mutex_lock" 269 }; 270 271 static DECLARE_RWSEM(torture_rwsem); 272 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 273 { 274 down_write(&torture_rwsem); 275 return 0; 276 } 277 278 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 279 { 280 const unsigned long longdelay_ms = 100; 281 282 /* We want a long delay occasionally to force massive contention. */ 283 if (!(torture_random(trsp) % 284 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 285 mdelay(longdelay_ms * 10); 286 else 287 mdelay(longdelay_ms / 10); 288 #ifdef CONFIG_PREEMPT 289 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 290 preempt_schedule(); /* Allow test to be preempted. */ 291 #endif 292 } 293 294 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 295 { 296 up_write(&torture_rwsem); 297 } 298 299 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 300 { 301 down_read(&torture_rwsem); 302 return 0; 303 } 304 305 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 306 { 307 const unsigned long longdelay_ms = 100; 308 309 /* We want a long delay occasionally to force massive contention. */ 310 if (!(torture_random(trsp) % 311 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 312 mdelay(longdelay_ms * 2); 313 else 314 mdelay(longdelay_ms / 2); 315 #ifdef CONFIG_PREEMPT 316 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 317 preempt_schedule(); /* Allow test to be preempted. */ 318 #endif 319 } 320 321 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 322 { 323 up_read(&torture_rwsem); 324 } 325 326 static struct lock_torture_ops rwsem_lock_ops = { 327 .writelock = torture_rwsem_down_write, 328 .write_delay = torture_rwsem_write_delay, 329 .writeunlock = torture_rwsem_up_write, 330 .readlock = torture_rwsem_down_read, 331 .read_delay = torture_rwsem_read_delay, 332 .readunlock = torture_rwsem_up_read, 333 .name = "rwsem_lock" 334 }; 335 336 /* 337 * Lock torture writer kthread. Repeatedly acquires and releases 338 * the lock, checking for duplicate acquisitions. 339 */ 340 static int lock_torture_writer(void *arg) 341 { 342 struct lock_stress_stats *lwsp = arg; 343 static DEFINE_TORTURE_RANDOM(rand); 344 345 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 346 set_user_nice(current, MAX_NICE); 347 348 do { 349 if ((torture_random(&rand) & 0xfffff) == 0) 350 schedule_timeout_uninterruptible(1); 351 cxt.cur_ops->writelock(); 352 if (WARN_ON_ONCE(lock_is_write_held)) 353 lwsp->n_lock_fail++; 354 lock_is_write_held = 1; 355 lwsp->n_lock_acquired++; 356 cxt.cur_ops->write_delay(&rand); 357 lock_is_write_held = 0; 358 cxt.cur_ops->writeunlock(); 359 stutter_wait("lock_torture_writer"); 360 } while (!torture_must_stop()); 361 torture_kthread_stopping("lock_torture_writer"); 362 return 0; 363 } 364 365 /* 366 * Lock torture reader kthread. Repeatedly acquires and releases 367 * the reader lock. 368 */ 369 static int lock_torture_reader(void *arg) 370 { 371 struct lock_stress_stats *lrsp = arg; 372 static DEFINE_TORTURE_RANDOM(rand); 373 374 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 375 set_user_nice(current, MAX_NICE); 376 377 do { 378 if ((torture_random(&rand) & 0xfffff) == 0) 379 schedule_timeout_uninterruptible(1); 380 cxt.cur_ops->readlock(); 381 lock_is_read_held = 1; 382 lrsp->n_lock_acquired++; 383 cxt.cur_ops->read_delay(&rand); 384 lock_is_read_held = 0; 385 cxt.cur_ops->readunlock(); 386 stutter_wait("lock_torture_reader"); 387 } while (!torture_must_stop()); 388 torture_kthread_stopping("lock_torture_reader"); 389 return 0; 390 } 391 392 /* 393 * Create an lock-torture-statistics message in the specified buffer. 394 */ 395 static void __torture_print_stats(char *page, 396 struct lock_stress_stats *statp, bool write) 397 { 398 bool fail = 0; 399 int i, n_stress; 400 long max = 0; 401 long min = statp[0].n_lock_acquired; 402 long long sum = 0; 403 404 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 405 for (i = 0; i < n_stress; i++) { 406 if (statp[i].n_lock_fail) 407 fail = true; 408 sum += statp[i].n_lock_acquired; 409 if (max < statp[i].n_lock_fail) 410 max = statp[i].n_lock_fail; 411 if (min > statp[i].n_lock_fail) 412 min = statp[i].n_lock_fail; 413 } 414 page += sprintf(page, 415 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 416 write ? "Writes" : "Reads ", 417 sum, max, min, max / 2 > min ? "???" : "", 418 fail, fail ? "!!!" : ""); 419 if (fail) 420 atomic_inc(&cxt.n_lock_torture_errors); 421 } 422 423 /* 424 * Print torture statistics. Caller must ensure that there is only one 425 * call to this function at a given time!!! This is normally accomplished 426 * by relying on the module system to only have one copy of the module 427 * loaded, and then by giving the lock_torture_stats kthread full control 428 * (or the init/cleanup functions when lock_torture_stats thread is not 429 * running). 430 */ 431 static void lock_torture_stats_print(void) 432 { 433 int size = cxt.nrealwriters_stress * 200 + 8192; 434 char *buf; 435 436 if (cxt.cur_ops->readlock) 437 size += cxt.nrealreaders_stress * 200 + 8192; 438 439 buf = kmalloc(size, GFP_KERNEL); 440 if (!buf) { 441 pr_err("lock_torture_stats_print: Out of memory, need: %d", 442 size); 443 return; 444 } 445 446 __torture_print_stats(buf, cxt.lwsa, true); 447 pr_alert("%s", buf); 448 kfree(buf); 449 450 if (cxt.cur_ops->readlock) { 451 buf = kmalloc(size, GFP_KERNEL); 452 if (!buf) { 453 pr_err("lock_torture_stats_print: Out of memory, need: %d", 454 size); 455 return; 456 } 457 458 __torture_print_stats(buf, cxt.lrsa, false); 459 pr_alert("%s", buf); 460 kfree(buf); 461 } 462 } 463 464 /* 465 * Periodically prints torture statistics, if periodic statistics printing 466 * was specified via the stat_interval module parameter. 467 * 468 * No need to worry about fullstop here, since this one doesn't reference 469 * volatile state or register callbacks. 470 */ 471 static int lock_torture_stats(void *arg) 472 { 473 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 474 do { 475 schedule_timeout_interruptible(stat_interval * HZ); 476 lock_torture_stats_print(); 477 torture_shutdown_absorb("lock_torture_stats"); 478 } while (!torture_must_stop()); 479 torture_kthread_stopping("lock_torture_stats"); 480 return 0; 481 } 482 483 static inline void 484 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 485 const char *tag) 486 { 487 pr_alert("%s" TORTURE_FLAG 488 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 489 torture_type, tag, cxt.debug_lock ? " [debug]": "", 490 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 491 verbose, shuffle_interval, stutter, shutdown_secs, 492 onoff_interval, onoff_holdoff); 493 } 494 495 static void lock_torture_cleanup(void) 496 { 497 int i; 498 499 if (torture_cleanup_begin()) 500 return; 501 502 if (writer_tasks) { 503 for (i = 0; i < cxt.nrealwriters_stress; i++) 504 torture_stop_kthread(lock_torture_writer, 505 writer_tasks[i]); 506 kfree(writer_tasks); 507 writer_tasks = NULL; 508 } 509 510 if (reader_tasks) { 511 for (i = 0; i < cxt.nrealreaders_stress; i++) 512 torture_stop_kthread(lock_torture_reader, 513 reader_tasks[i]); 514 kfree(reader_tasks); 515 reader_tasks = NULL; 516 } 517 518 torture_stop_kthread(lock_torture_stats, stats_task); 519 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 520 521 if (atomic_read(&cxt.n_lock_torture_errors)) 522 lock_torture_print_module_parms(cxt.cur_ops, 523 "End of test: FAILURE"); 524 else if (torture_onoff_failures()) 525 lock_torture_print_module_parms(cxt.cur_ops, 526 "End of test: LOCK_HOTPLUG"); 527 else 528 lock_torture_print_module_parms(cxt.cur_ops, 529 "End of test: SUCCESS"); 530 torture_cleanup_end(); 531 } 532 533 static int __init lock_torture_init(void) 534 { 535 int i, j; 536 int firsterr = 0; 537 static struct lock_torture_ops *torture_ops[] = { 538 &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, 539 &mutex_lock_ops, &rwsem_lock_ops, 540 }; 541 542 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 543 return -EBUSY; 544 545 /* Process args and tell the world that the torturer is on the job. */ 546 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 547 cxt.cur_ops = torture_ops[i]; 548 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 549 break; 550 } 551 if (i == ARRAY_SIZE(torture_ops)) { 552 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 553 torture_type); 554 pr_alert("lock-torture types:"); 555 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 556 pr_alert(" %s", torture_ops[i]->name); 557 pr_alert("\n"); 558 torture_init_end(); 559 return -EINVAL; 560 } 561 if (cxt.cur_ops->init) 562 cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 563 564 if (nwriters_stress >= 0) 565 cxt.nrealwriters_stress = nwriters_stress; 566 else 567 cxt.nrealwriters_stress = 2 * num_online_cpus(); 568 569 #ifdef CONFIG_DEBUG_MUTEXES 570 if (strncmp(torture_type, "mutex", 5) == 0) 571 cxt.debug_lock = true; 572 #endif 573 #ifdef CONFIG_DEBUG_SPINLOCK 574 if (strncmp(torture_type, "spin", 4) == 0) 575 cxt.debug_lock = true; 576 #endif 577 578 /* Initialize the statistics so that each run gets its own numbers. */ 579 580 lock_is_write_held = 0; 581 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); 582 if (cxt.lwsa == NULL) { 583 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 584 firsterr = -ENOMEM; 585 goto unwind; 586 } 587 for (i = 0; i < cxt.nrealwriters_stress; i++) { 588 cxt.lwsa[i].n_lock_fail = 0; 589 cxt.lwsa[i].n_lock_acquired = 0; 590 } 591 592 if (cxt.cur_ops->readlock) { 593 if (nreaders_stress >= 0) 594 cxt.nrealreaders_stress = nreaders_stress; 595 else { 596 /* 597 * By default distribute evenly the number of 598 * readers and writers. We still run the same number 599 * of threads as the writer-only locks default. 600 */ 601 if (nwriters_stress < 0) /* user doesn't care */ 602 cxt.nrealwriters_stress = num_online_cpus(); 603 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 604 } 605 606 lock_is_read_held = 0; 607 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); 608 if (cxt.lrsa == NULL) { 609 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 610 firsterr = -ENOMEM; 611 kfree(cxt.lwsa); 612 goto unwind; 613 } 614 615 for (i = 0; i < cxt.nrealreaders_stress; i++) { 616 cxt.lrsa[i].n_lock_fail = 0; 617 cxt.lrsa[i].n_lock_acquired = 0; 618 } 619 } 620 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 621 622 /* Prepare torture context. */ 623 if (onoff_interval > 0) { 624 firsterr = torture_onoff_init(onoff_holdoff * HZ, 625 onoff_interval * HZ); 626 if (firsterr) 627 goto unwind; 628 } 629 if (shuffle_interval > 0) { 630 firsterr = torture_shuffle_init(shuffle_interval); 631 if (firsterr) 632 goto unwind; 633 } 634 if (shutdown_secs > 0) { 635 firsterr = torture_shutdown_init(shutdown_secs, 636 lock_torture_cleanup); 637 if (firsterr) 638 goto unwind; 639 } 640 if (stutter > 0) { 641 firsterr = torture_stutter_init(stutter); 642 if (firsterr) 643 goto unwind; 644 } 645 646 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), 647 GFP_KERNEL); 648 if (writer_tasks == NULL) { 649 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 650 firsterr = -ENOMEM; 651 goto unwind; 652 } 653 654 if (cxt.cur_ops->readlock) { 655 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]), 656 GFP_KERNEL); 657 if (reader_tasks == NULL) { 658 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 659 firsterr = -ENOMEM; 660 goto unwind; 661 } 662 } 663 664 /* 665 * Create the kthreads and start torturing (oh, those poor little locks). 666 * 667 * TODO: Note that we interleave writers with readers, giving writers a 668 * slight advantage, by creating its kthread first. This can be modified 669 * for very specific needs, or even let the user choose the policy, if 670 * ever wanted. 671 */ 672 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 673 j < cxt.nrealreaders_stress; i++, j++) { 674 if (i >= cxt.nrealwriters_stress) 675 goto create_reader; 676 677 /* Create writer. */ 678 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 679 writer_tasks[i]); 680 if (firsterr) 681 goto unwind; 682 683 create_reader: 684 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 685 continue; 686 /* Create reader. */ 687 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 688 reader_tasks[j]); 689 if (firsterr) 690 goto unwind; 691 } 692 if (stat_interval > 0) { 693 firsterr = torture_create_kthread(lock_torture_stats, NULL, 694 stats_task); 695 if (firsterr) 696 goto unwind; 697 } 698 torture_init_end(); 699 return 0; 700 701 unwind: 702 torture_init_end(); 703 lock_torture_cleanup(); 704 return firsterr; 705 } 706 707 module_init(lock_torture_init); 708 module_exit(lock_torture_cleanup); 709