1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Common functions for in-kernel torture tests. 4 * 5 * Copyright (C) IBM Corporation, 2014 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Based on kernel/rcu/torture.c. 9 */ 10 11 #define pr_fmt(fmt) fmt 12 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/kthread.h> 18 #include <linux/err.h> 19 #include <linux/spinlock.h> 20 #include <linux/smp.h> 21 #include <linux/interrupt.h> 22 #include <linux/sched.h> 23 #include <linux/sched/clock.h> 24 #include <linux/atomic.h> 25 #include <linux/bitops.h> 26 #include <linux/completion.h> 27 #include <linux/moduleparam.h> 28 #include <linux/percpu.h> 29 #include <linux/notifier.h> 30 #include <linux/reboot.h> 31 #include <linux/freezer.h> 32 #include <linux/cpu.h> 33 #include <linux/delay.h> 34 #include <linux/stat.h> 35 #include <linux/slab.h> 36 #include <linux/trace_clock.h> 37 #include <linux/ktime.h> 38 #include <asm/byteorder.h> 39 #include <linux/torture.h> 40 #include "rcu/rcu.h" 41 42 MODULE_LICENSE("GPL"); 43 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); 44 45 static bool disable_onoff_at_boot; 46 module_param(disable_onoff_at_boot, bool, 0444); 47 48 static char *torture_type; 49 static int verbose; 50 51 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ 52 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ 53 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */ 54 #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */ 55 static int fullstop = FULLSTOP_RMMOD; 56 static DEFINE_MUTEX(fullstop_mutex); 57 58 #ifdef CONFIG_HOTPLUG_CPU 59 60 /* 61 * Variables for online-offline handling. Only present if CPU hotplug 62 * is enabled, otherwise does nothing. 63 */ 64 65 static struct task_struct *onoff_task; 66 static long onoff_holdoff; 67 static long onoff_interval; 68 static torture_ofl_func *onoff_f; 69 static long n_offline_attempts; 70 static long n_offline_successes; 71 static unsigned long sum_offline; 72 static int min_offline = -1; 73 static int max_offline; 74 static long n_online_attempts; 75 static long n_online_successes; 76 static unsigned long sum_online; 77 static int min_online = -1; 78 static int max_online; 79 80 /* 81 * Attempt to take a CPU offline. Return false if the CPU is already 82 * offline or if it is not subject to CPU-hotplug operations. The 83 * caller can detect other failures by looking at the statistics. 84 */ 85 bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, 86 unsigned long *sum_offl, int *min_offl, int *max_offl) 87 { 88 unsigned long delta; 89 int ret; 90 char *s; 91 unsigned long starttime; 92 93 if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 94 return false; 95 if (num_online_cpus() <= 1) 96 return false; /* Can't offline the last CPU. */ 97 98 if (verbose > 1) 99 pr_alert("%s" TORTURE_FLAG 100 "torture_onoff task: offlining %d\n", 101 torture_type, cpu); 102 starttime = jiffies; 103 (*n_offl_attempts)++; 104 ret = remove_cpu(cpu); 105 if (ret) { 106 s = ""; 107 if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { 108 // PCI probe frequently disables hotplug during boot. 109 (*n_offl_attempts)--; 110 s = " (-EBUSY forgiven during boot)"; 111 } 112 if (verbose) 113 pr_alert("%s" TORTURE_FLAG 114 "torture_onoff task: offline %d failed%s: errno %d\n", 115 torture_type, cpu, s, ret); 116 } else { 117 if (verbose > 1) 118 pr_alert("%s" TORTURE_FLAG 119 "torture_onoff task: offlined %d\n", 120 torture_type, cpu); 121 if (onoff_f) 122 onoff_f(); 123 (*n_offl_successes)++; 124 delta = jiffies - starttime; 125 *sum_offl += delta; 126 if (*min_offl < 0) { 127 *min_offl = delta; 128 *max_offl = delta; 129 } 130 if (*min_offl > delta) 131 *min_offl = delta; 132 if (*max_offl < delta) 133 *max_offl = delta; 134 } 135 136 return true; 137 } 138 EXPORT_SYMBOL_GPL(torture_offline); 139 140 /* 141 * Attempt to bring a CPU online. Return false if the CPU is already 142 * online or if it is not subject to CPU-hotplug operations. The 143 * caller can detect other failures by looking at the statistics. 144 */ 145 bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, 146 unsigned long *sum_onl, int *min_onl, int *max_onl) 147 { 148 unsigned long delta; 149 int ret; 150 char *s; 151 unsigned long starttime; 152 153 if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 154 return false; 155 156 if (verbose > 1) 157 pr_alert("%s" TORTURE_FLAG 158 "torture_onoff task: onlining %d\n", 159 torture_type, cpu); 160 starttime = jiffies; 161 (*n_onl_attempts)++; 162 ret = add_cpu(cpu); 163 if (ret) { 164 s = ""; 165 if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { 166 // PCI probe frequently disables hotplug during boot. 167 (*n_onl_attempts)--; 168 s = " (-EBUSY forgiven during boot)"; 169 } 170 if (verbose) 171 pr_alert("%s" TORTURE_FLAG 172 "torture_onoff task: online %d failed%s: errno %d\n", 173 torture_type, cpu, s, ret); 174 } else { 175 if (verbose > 1) 176 pr_alert("%s" TORTURE_FLAG 177 "torture_onoff task: onlined %d\n", 178 torture_type, cpu); 179 (*n_onl_successes)++; 180 delta = jiffies - starttime; 181 *sum_onl += delta; 182 if (*min_onl < 0) { 183 *min_onl = delta; 184 *max_onl = delta; 185 } 186 if (*min_onl > delta) 187 *min_onl = delta; 188 if (*max_onl < delta) 189 *max_onl = delta; 190 } 191 192 return true; 193 } 194 EXPORT_SYMBOL_GPL(torture_online); 195 196 /* 197 * Execute random CPU-hotplug operations at the interval specified 198 * by the onoff_interval. 199 */ 200 static int 201 torture_onoff(void *arg) 202 { 203 int cpu; 204 int maxcpu = -1; 205 DEFINE_TORTURE_RANDOM(rand); 206 int ret; 207 208 VERBOSE_TOROUT_STRING("torture_onoff task started"); 209 for_each_online_cpu(cpu) 210 maxcpu = cpu; 211 WARN_ON(maxcpu < 0); 212 if (!IS_MODULE(CONFIG_TORTURE_TEST)) { 213 for_each_possible_cpu(cpu) { 214 if (cpu_online(cpu)) 215 continue; 216 ret = add_cpu(cpu); 217 if (ret && verbose) { 218 pr_alert("%s" TORTURE_FLAG 219 "%s: Initial online %d: errno %d\n", 220 __func__, torture_type, cpu, ret); 221 } 222 } 223 } 224 225 if (maxcpu == 0) { 226 VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); 227 goto stop; 228 } 229 230 if (onoff_holdoff > 0) { 231 VERBOSE_TOROUT_STRING("torture_onoff begin holdoff"); 232 schedule_timeout_interruptible(onoff_holdoff); 233 VERBOSE_TOROUT_STRING("torture_onoff end holdoff"); 234 } 235 while (!torture_must_stop()) { 236 if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) { 237 schedule_timeout_interruptible(HZ / 10); 238 continue; 239 } 240 cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); 241 if (!torture_offline(cpu, 242 &n_offline_attempts, &n_offline_successes, 243 &sum_offline, &min_offline, &max_offline)) 244 torture_online(cpu, 245 &n_online_attempts, &n_online_successes, 246 &sum_online, &min_online, &max_online); 247 schedule_timeout_interruptible(onoff_interval); 248 } 249 250 stop: 251 torture_kthread_stopping("torture_onoff"); 252 return 0; 253 } 254 255 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 256 257 /* 258 * Initiate online-offline handling. 259 */ 260 int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f) 261 { 262 #ifdef CONFIG_HOTPLUG_CPU 263 onoff_holdoff = ooholdoff; 264 onoff_interval = oointerval; 265 onoff_f = f; 266 if (onoff_interval <= 0) 267 return 0; 268 return torture_create_kthread(torture_onoff, NULL, onoff_task); 269 #else /* #ifdef CONFIG_HOTPLUG_CPU */ 270 return 0; 271 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 272 } 273 EXPORT_SYMBOL_GPL(torture_onoff_init); 274 275 /* 276 * Clean up after online/offline testing. 277 */ 278 static void torture_onoff_cleanup(void) 279 { 280 #ifdef CONFIG_HOTPLUG_CPU 281 if (onoff_task == NULL) 282 return; 283 VERBOSE_TOROUT_STRING("Stopping torture_onoff task"); 284 kthread_stop(onoff_task); 285 onoff_task = NULL; 286 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 287 } 288 289 /* 290 * Print online/offline testing statistics. 291 */ 292 void torture_onoff_stats(void) 293 { 294 #ifdef CONFIG_HOTPLUG_CPU 295 pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", 296 n_online_successes, n_online_attempts, 297 n_offline_successes, n_offline_attempts, 298 min_online, max_online, 299 min_offline, max_offline, 300 sum_online, sum_offline, HZ); 301 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 302 } 303 EXPORT_SYMBOL_GPL(torture_onoff_stats); 304 305 /* 306 * Were all the online/offline operations successful? 307 */ 308 bool torture_onoff_failures(void) 309 { 310 #ifdef CONFIG_HOTPLUG_CPU 311 return n_online_successes != n_online_attempts || 312 n_offline_successes != n_offline_attempts; 313 #else /* #ifdef CONFIG_HOTPLUG_CPU */ 314 return false; 315 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 316 } 317 EXPORT_SYMBOL_GPL(torture_onoff_failures); 318 319 #define TORTURE_RANDOM_MULT 39916801 /* prime */ 320 #define TORTURE_RANDOM_ADD 479001701 /* prime */ 321 #define TORTURE_RANDOM_REFRESH 10000 322 323 /* 324 * Crude but fast random-number generator. Uses a linear congruential 325 * generator, with occasional help from cpu_clock(). 326 */ 327 unsigned long 328 torture_random(struct torture_random_state *trsp) 329 { 330 if (--trsp->trs_count < 0) { 331 trsp->trs_state += (unsigned long)local_clock(); 332 trsp->trs_count = TORTURE_RANDOM_REFRESH; 333 } 334 trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT + 335 TORTURE_RANDOM_ADD; 336 return swahw32(trsp->trs_state); 337 } 338 EXPORT_SYMBOL_GPL(torture_random); 339 340 /* 341 * Variables for shuffling. The idea is to ensure that each CPU stays 342 * idle for an extended period to test interactions with dyntick idle, 343 * as well as interactions with any per-CPU variables. 344 */ 345 struct shuffle_task { 346 struct list_head st_l; 347 struct task_struct *st_t; 348 }; 349 350 static long shuffle_interval; /* In jiffies. */ 351 static struct task_struct *shuffler_task; 352 static cpumask_var_t shuffle_tmp_mask; 353 static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ 354 static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); 355 static DEFINE_MUTEX(shuffle_task_mutex); 356 357 /* 358 * Register a task to be shuffled. If there is no memory, just splat 359 * and don't bother registering. 360 */ 361 void torture_shuffle_task_register(struct task_struct *tp) 362 { 363 struct shuffle_task *stp; 364 365 if (WARN_ON_ONCE(tp == NULL)) 366 return; 367 stp = kmalloc(sizeof(*stp), GFP_KERNEL); 368 if (WARN_ON_ONCE(stp == NULL)) 369 return; 370 stp->st_t = tp; 371 mutex_lock(&shuffle_task_mutex); 372 list_add(&stp->st_l, &shuffle_task_list); 373 mutex_unlock(&shuffle_task_mutex); 374 } 375 EXPORT_SYMBOL_GPL(torture_shuffle_task_register); 376 377 /* 378 * Unregister all tasks, for example, at the end of the torture run. 379 */ 380 static void torture_shuffle_task_unregister_all(void) 381 { 382 struct shuffle_task *stp; 383 struct shuffle_task *p; 384 385 mutex_lock(&shuffle_task_mutex); 386 list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { 387 list_del(&stp->st_l); 388 kfree(stp); 389 } 390 mutex_unlock(&shuffle_task_mutex); 391 } 392 393 /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. 394 * A special case is when shuffle_idle_cpu = -1, in which case we allow 395 * the tasks to run on all CPUs. 396 */ 397 static void torture_shuffle_tasks(void) 398 { 399 struct shuffle_task *stp; 400 401 cpumask_setall(shuffle_tmp_mask); 402 get_online_cpus(); 403 404 /* No point in shuffling if there is only one online CPU (ex: UP) */ 405 if (num_online_cpus() == 1) { 406 put_online_cpus(); 407 return; 408 } 409 410 /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ 411 shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask); 412 if (shuffle_idle_cpu >= nr_cpu_ids) 413 shuffle_idle_cpu = -1; 414 else 415 cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); 416 417 mutex_lock(&shuffle_task_mutex); 418 list_for_each_entry(stp, &shuffle_task_list, st_l) 419 set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); 420 mutex_unlock(&shuffle_task_mutex); 421 422 put_online_cpus(); 423 } 424 425 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 426 * system to become idle at a time and cut off its timer ticks. This is meant 427 * to test the support for such tickless idle CPU in RCU. 428 */ 429 static int torture_shuffle(void *arg) 430 { 431 VERBOSE_TOROUT_STRING("torture_shuffle task started"); 432 do { 433 schedule_timeout_interruptible(shuffle_interval); 434 torture_shuffle_tasks(); 435 torture_shutdown_absorb("torture_shuffle"); 436 } while (!torture_must_stop()); 437 torture_kthread_stopping("torture_shuffle"); 438 return 0; 439 } 440 441 /* 442 * Start the shuffler, with shuffint in jiffies. 443 */ 444 int torture_shuffle_init(long shuffint) 445 { 446 shuffle_interval = shuffint; 447 448 shuffle_idle_cpu = -1; 449 450 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { 451 VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); 452 return -ENOMEM; 453 } 454 455 /* Create the shuffler thread */ 456 return torture_create_kthread(torture_shuffle, NULL, shuffler_task); 457 } 458 EXPORT_SYMBOL_GPL(torture_shuffle_init); 459 460 /* 461 * Stop the shuffling. 462 */ 463 static void torture_shuffle_cleanup(void) 464 { 465 torture_shuffle_task_unregister_all(); 466 if (shuffler_task) { 467 VERBOSE_TOROUT_STRING("Stopping torture_shuffle task"); 468 kthread_stop(shuffler_task); 469 free_cpumask_var(shuffle_tmp_mask); 470 } 471 shuffler_task = NULL; 472 } 473 474 /* 475 * Variables for auto-shutdown. This allows "lights out" torture runs 476 * to be fully scripted. 477 */ 478 static struct task_struct *shutdown_task; 479 static ktime_t shutdown_time; /* time to system shutdown. */ 480 static void (*torture_shutdown_hook)(void); 481 482 /* 483 * Absorb kthreads into a kernel function that won't return, so that 484 * they won't ever access module text or data again. 485 */ 486 void torture_shutdown_absorb(const char *title) 487 { 488 while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 489 pr_notice("torture thread %s parking due to system shutdown\n", 490 title); 491 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); 492 } 493 } 494 EXPORT_SYMBOL_GPL(torture_shutdown_absorb); 495 496 /* 497 * Cause the torture test to shutdown the system after the test has 498 * run for the time specified by the shutdown_secs parameter. 499 */ 500 static int torture_shutdown(void *arg) 501 { 502 ktime_t ktime_snap; 503 504 VERBOSE_TOROUT_STRING("torture_shutdown task started"); 505 ktime_snap = ktime_get(); 506 while (ktime_before(ktime_snap, shutdown_time) && 507 !torture_must_stop()) { 508 if (verbose) 509 pr_alert("%s" TORTURE_FLAG 510 "torture_shutdown task: %llu ms remaining\n", 511 torture_type, 512 ktime_ms_delta(shutdown_time, ktime_snap)); 513 set_current_state(TASK_INTERRUPTIBLE); 514 schedule_hrtimeout(&shutdown_time, HRTIMER_MODE_ABS); 515 ktime_snap = ktime_get(); 516 } 517 if (torture_must_stop()) { 518 torture_kthread_stopping("torture_shutdown"); 519 return 0; 520 } 521 522 /* OK, shut down the system. */ 523 524 VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system"); 525 shutdown_task = NULL; /* Avoid self-kill deadlock. */ 526 if (torture_shutdown_hook) 527 torture_shutdown_hook(); 528 else 529 VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping."); 530 rcu_ftrace_dump(DUMP_ALL); 531 kernel_power_off(); /* Shut down the system. */ 532 return 0; 533 } 534 535 /* 536 * Start up the shutdown task. 537 */ 538 int torture_shutdown_init(int ssecs, void (*cleanup)(void)) 539 { 540 torture_shutdown_hook = cleanup; 541 if (ssecs > 0) { 542 shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); 543 return torture_create_kthread(torture_shutdown, NULL, 544 shutdown_task); 545 } 546 return 0; 547 } 548 EXPORT_SYMBOL_GPL(torture_shutdown_init); 549 550 /* 551 * Detect and respond to a system shutdown. 552 */ 553 static int torture_shutdown_notify(struct notifier_block *unused1, 554 unsigned long unused2, void *unused3) 555 { 556 mutex_lock(&fullstop_mutex); 557 if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) { 558 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); 559 WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN); 560 } else { 561 pr_warn("Concurrent rmmod and shutdown illegal!\n"); 562 } 563 mutex_unlock(&fullstop_mutex); 564 return NOTIFY_DONE; 565 } 566 567 static struct notifier_block torture_shutdown_nb = { 568 .notifier_call = torture_shutdown_notify, 569 }; 570 571 /* 572 * Shut down the shutdown task. Say what??? Heh! This can happen if 573 * the torture module gets an rmmod before the shutdown time arrives. ;-) 574 */ 575 static void torture_shutdown_cleanup(void) 576 { 577 unregister_reboot_notifier(&torture_shutdown_nb); 578 if (shutdown_task != NULL) { 579 VERBOSE_TOROUT_STRING("Stopping torture_shutdown task"); 580 kthread_stop(shutdown_task); 581 } 582 shutdown_task = NULL; 583 } 584 585 /* 586 * Variables for stuttering, which means to periodically pause and 587 * restart testing in order to catch bugs that appear when load is 588 * suddenly applied to or removed from the system. 589 */ 590 static struct task_struct *stutter_task; 591 static int stutter_pause_test; 592 static int stutter; 593 static int stutter_gap; 594 595 /* 596 * Block until the stutter interval ends. This must be called periodically 597 * by all running kthreads that need to be subject to stuttering. 598 */ 599 bool stutter_wait(const char *title) 600 { 601 int spt; 602 bool ret = false; 603 604 cond_resched_tasks_rcu_qs(); 605 spt = READ_ONCE(stutter_pause_test); 606 for (; spt; spt = READ_ONCE(stutter_pause_test)) { 607 ret = true; 608 if (spt == 1) { 609 schedule_timeout_interruptible(1); 610 } else if (spt == 2) { 611 while (READ_ONCE(stutter_pause_test)) 612 cond_resched(); 613 } else { 614 schedule_timeout_interruptible(round_jiffies_relative(HZ)); 615 } 616 torture_shutdown_absorb(title); 617 } 618 return ret; 619 } 620 EXPORT_SYMBOL_GPL(stutter_wait); 621 622 /* 623 * Cause the torture test to "stutter", starting and stopping all 624 * threads periodically. 625 */ 626 static int torture_stutter(void *arg) 627 { 628 int wtime; 629 630 VERBOSE_TOROUT_STRING("torture_stutter task started"); 631 do { 632 if (!torture_must_stop() && stutter > 1) { 633 wtime = stutter; 634 if (stutter > HZ + 1) { 635 WRITE_ONCE(stutter_pause_test, 1); 636 wtime = stutter - HZ - 1; 637 schedule_timeout_interruptible(wtime); 638 wtime = HZ + 1; 639 } 640 WRITE_ONCE(stutter_pause_test, 2); 641 schedule_timeout_interruptible(wtime); 642 } 643 WRITE_ONCE(stutter_pause_test, 0); 644 if (!torture_must_stop()) 645 schedule_timeout_interruptible(stutter_gap); 646 torture_shutdown_absorb("torture_stutter"); 647 } while (!torture_must_stop()); 648 torture_kthread_stopping("torture_stutter"); 649 return 0; 650 } 651 652 /* 653 * Initialize and kick off the torture_stutter kthread. 654 */ 655 int torture_stutter_init(const int s, const int sgap) 656 { 657 stutter = s; 658 stutter_gap = sgap; 659 return torture_create_kthread(torture_stutter, NULL, stutter_task); 660 } 661 EXPORT_SYMBOL_GPL(torture_stutter_init); 662 663 /* 664 * Cleanup after the torture_stutter kthread. 665 */ 666 static void torture_stutter_cleanup(void) 667 { 668 if (!stutter_task) 669 return; 670 VERBOSE_TOROUT_STRING("Stopping torture_stutter task"); 671 kthread_stop(stutter_task); 672 stutter_task = NULL; 673 } 674 675 /* 676 * Initialize torture module. Please note that this is -not- invoked via 677 * the usual module_init() mechanism, but rather by an explicit call from 678 * the client torture module. This call must be paired with a later 679 * torture_init_end(). 680 * 681 * The runnable parameter points to a flag that controls whether or not 682 * the test is currently runnable. If there is no such flag, pass in NULL. 683 */ 684 bool torture_init_begin(char *ttype, int v) 685 { 686 mutex_lock(&fullstop_mutex); 687 if (torture_type != NULL) { 688 pr_alert("torture_init_begin: Refusing %s init: %s running.\n", 689 ttype, torture_type); 690 pr_alert("torture_init_begin: One torture test at a time!\n"); 691 mutex_unlock(&fullstop_mutex); 692 return false; 693 } 694 torture_type = ttype; 695 verbose = v; 696 fullstop = FULLSTOP_DONTSTOP; 697 return true; 698 } 699 EXPORT_SYMBOL_GPL(torture_init_begin); 700 701 /* 702 * Tell the torture module that initialization is complete. 703 */ 704 void torture_init_end(void) 705 { 706 mutex_unlock(&fullstop_mutex); 707 register_reboot_notifier(&torture_shutdown_nb); 708 } 709 EXPORT_SYMBOL_GPL(torture_init_end); 710 711 /* 712 * Clean up torture module. Please note that this is -not- invoked via 713 * the usual module_exit() mechanism, but rather by an explicit call from 714 * the client torture module. Returns true if a race with system shutdown 715 * is detected, otherwise, all kthreads started by functions in this file 716 * will be shut down. 717 * 718 * This must be called before the caller starts shutting down its own 719 * kthreads. 720 * 721 * Both torture_cleanup_begin() and torture_cleanup_end() must be paired, 722 * in order to correctly perform the cleanup. They are separated because 723 * threads can still need to reference the torture_type type, thus nullify 724 * only after completing all other relevant calls. 725 */ 726 bool torture_cleanup_begin(void) 727 { 728 mutex_lock(&fullstop_mutex); 729 if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 730 pr_warn("Concurrent rmmod and shutdown illegal!\n"); 731 mutex_unlock(&fullstop_mutex); 732 schedule_timeout_uninterruptible(10); 733 return true; 734 } 735 WRITE_ONCE(fullstop, FULLSTOP_RMMOD); 736 mutex_unlock(&fullstop_mutex); 737 torture_shutdown_cleanup(); 738 torture_shuffle_cleanup(); 739 torture_stutter_cleanup(); 740 torture_onoff_cleanup(); 741 return false; 742 } 743 EXPORT_SYMBOL_GPL(torture_cleanup_begin); 744 745 void torture_cleanup_end(void) 746 { 747 mutex_lock(&fullstop_mutex); 748 torture_type = NULL; 749 mutex_unlock(&fullstop_mutex); 750 } 751 EXPORT_SYMBOL_GPL(torture_cleanup_end); 752 753 /* 754 * Is it time for the current torture test to stop? 755 */ 756 bool torture_must_stop(void) 757 { 758 return torture_must_stop_irq() || kthread_should_stop(); 759 } 760 EXPORT_SYMBOL_GPL(torture_must_stop); 761 762 /* 763 * Is it time for the current torture test to stop? This is the irq-safe 764 * version, hence no check for kthread_should_stop(). 765 */ 766 bool torture_must_stop_irq(void) 767 { 768 return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP; 769 } 770 EXPORT_SYMBOL_GPL(torture_must_stop_irq); 771 772 /* 773 * Each kthread must wait for kthread_should_stop() before returning from 774 * its top-level function, otherwise segfaults ensue. This function 775 * prints a "stopping" message and waits for kthread_should_stop(), and 776 * should be called from all torture kthreads immediately prior to 777 * returning. 778 */ 779 void torture_kthread_stopping(char *title) 780 { 781 char buf[128]; 782 783 snprintf(buf, sizeof(buf), "Stopping %s", title); 784 VERBOSE_TOROUT_STRING(buf); 785 while (!kthread_should_stop()) { 786 torture_shutdown_absorb(title); 787 schedule_timeout_uninterruptible(1); 788 } 789 } 790 EXPORT_SYMBOL_GPL(torture_kthread_stopping); 791 792 /* 793 * Create a generic torture kthread that is immediately runnable. If you 794 * need the kthread to be stopped so that you can do something to it before 795 * it starts, you will need to open-code your own. 796 */ 797 int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, 798 char *f, struct task_struct **tp) 799 { 800 int ret = 0; 801 802 VERBOSE_TOROUT_STRING(m); 803 *tp = kthread_run(fn, arg, "%s", s); 804 if (IS_ERR(*tp)) { 805 ret = PTR_ERR(*tp); 806 VERBOSE_TOROUT_ERRSTRING(f); 807 *tp = NULL; 808 } 809 torture_shuffle_task_register(*tp); 810 return ret; 811 } 812 EXPORT_SYMBOL_GPL(_torture_create_kthread); 813 814 /* 815 * Stop a generic kthread, emitting a message. 816 */ 817 void _torture_stop_kthread(char *m, struct task_struct **tp) 818 { 819 if (*tp == NULL) 820 return; 821 VERBOSE_TOROUT_STRING(m); 822 kthread_stop(*tp); 823 *tp = NULL; 824 } 825 EXPORT_SYMBOL_GPL(_torture_stop_kthread); 826