1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Author: Paul E. McKenney <paulmck@us.ibm.com> 21 * Based on kernel/rcu/torture.c. 22 */ 23 #include <linux/types.h> 24 #include <linux/kernel.h> 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/kthread.h> 28 #include <linux/err.h> 29 #include <linux/spinlock.h> 30 #include <linux/smp.h> 31 #include <linux/interrupt.h> 32 #include <linux/sched.h> 33 #include <linux/atomic.h> 34 #include <linux/bitops.h> 35 #include <linux/completion.h> 36 #include <linux/moduleparam.h> 37 #include <linux/percpu.h> 38 #include <linux/notifier.h> 39 #include <linux/reboot.h> 40 #include <linux/freezer.h> 41 #include <linux/cpu.h> 42 #include <linux/delay.h> 43 #include <linux/stat.h> 44 #include <linux/slab.h> 45 #include <linux/trace_clock.h> 46 #include <asm/byteorder.h> 47 #include <linux/torture.h> 48 49 MODULE_LICENSE("GPL"); 50 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 51 52 torture_param(int, nwriters_stress, -1, 53 "Number of write-locking stress-test threads"); 54 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 55 torture_param(int, onoff_interval, 0, 56 "Time between CPU hotplugs (s), 0=disable"); 57 torture_param(int, shuffle_interval, 3, 58 "Number of jiffies between shuffles, 0=disable"); 59 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 60 torture_param(int, stat_interval, 60, 61 "Number of seconds between stats printk()s"); 62 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 63 torture_param(bool, verbose, true, 64 "Enable verbose debugging printk()s"); 65 66 static char *torture_type = "spin_lock"; 67 module_param(torture_type, charp, 0444); 68 MODULE_PARM_DESC(torture_type, 69 "Type of lock to torture (spin_lock, spin_lock_irq, ...)"); 70 71 static atomic_t n_lock_torture_errors; 72 73 static struct task_struct *stats_task; 74 static struct task_struct **writer_tasks; 75 76 static int nrealwriters_stress; 77 static bool lock_is_write_held; 78 79 struct lock_writer_stress_stats { 80 long n_write_lock_fail; 81 long n_write_lock_acquired; 82 }; 83 static struct lock_writer_stress_stats *lwsa; 84 85 #if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE) 86 #define LOCKTORTURE_RUNNABLE_INIT 1 87 #else 88 #define LOCKTORTURE_RUNNABLE_INIT 0 89 #endif 90 int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT; 91 module_param(locktorture_runnable, int, 0444); 92 MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot"); 93 94 /* Forward reference. */ 95 static void lock_torture_cleanup(void); 96 97 /* 98 * Operations vector for selecting different types of tests. 99 */ 100 struct lock_torture_ops { 101 void (*init)(void); 102 int (*writelock)(void); 103 void (*write_delay)(struct torture_random_state *trsp); 104 void (*writeunlock)(void); 105 unsigned long flags; 106 const char *name; 107 }; 108 109 static struct lock_torture_ops *cur_ops; 110 111 /* 112 * Definitions for lock torture testing. 113 */ 114 115 static int torture_lock_busted_write_lock(void) 116 { 117 return 0; /* BUGGY, do not use in real life!!! */ 118 } 119 120 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 121 { 122 const unsigned long longdelay_us = 100; 123 124 /* We want a long delay occasionally to force massive contention. */ 125 if (!(torture_random(trsp) % 126 (nrealwriters_stress * 2000 * longdelay_us))) 127 mdelay(longdelay_us); 128 #ifdef CONFIG_PREEMPT 129 if (!(torture_random(trsp) % (nrealwriters_stress * 20000))) 130 preempt_schedule(); /* Allow test to be preempted. */ 131 #endif 132 } 133 134 static void torture_lock_busted_write_unlock(void) 135 { 136 /* BUGGY, do not use in real life!!! */ 137 } 138 139 static struct lock_torture_ops lock_busted_ops = { 140 .writelock = torture_lock_busted_write_lock, 141 .write_delay = torture_lock_busted_write_delay, 142 .writeunlock = torture_lock_busted_write_unlock, 143 .name = "lock_busted" 144 }; 145 146 static DEFINE_SPINLOCK(torture_spinlock); 147 148 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 149 { 150 spin_lock(&torture_spinlock); 151 return 0; 152 } 153 154 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 155 { 156 const unsigned long shortdelay_us = 2; 157 const unsigned long longdelay_us = 100; 158 159 /* We want a short delay mostly to emulate likely code, and 160 * we want a long delay occasionally to force massive contention. 161 */ 162 if (!(torture_random(trsp) % 163 (nrealwriters_stress * 2000 * longdelay_us))) 164 mdelay(longdelay_us); 165 if (!(torture_random(trsp) % 166 (nrealwriters_stress * 2 * shortdelay_us))) 167 udelay(shortdelay_us); 168 #ifdef CONFIG_PREEMPT 169 if (!(torture_random(trsp) % (nrealwriters_stress * 20000))) 170 preempt_schedule(); /* Allow test to be preempted. */ 171 #endif 172 } 173 174 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 175 { 176 spin_unlock(&torture_spinlock); 177 } 178 179 static struct lock_torture_ops spin_lock_ops = { 180 .writelock = torture_spin_lock_write_lock, 181 .write_delay = torture_spin_lock_write_delay, 182 .writeunlock = torture_spin_lock_write_unlock, 183 .name = "spin_lock" 184 }; 185 186 static int torture_spin_lock_write_lock_irq(void) 187 __acquires(torture_spinlock_irq) 188 { 189 unsigned long flags; 190 191 spin_lock_irqsave(&torture_spinlock, flags); 192 cur_ops->flags = flags; 193 return 0; 194 } 195 196 static void torture_lock_spin_write_unlock_irq(void) 197 __releases(torture_spinlock) 198 { 199 spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags); 200 } 201 202 static struct lock_torture_ops spin_lock_irq_ops = { 203 .writelock = torture_spin_lock_write_lock_irq, 204 .write_delay = torture_spin_lock_write_delay, 205 .writeunlock = torture_lock_spin_write_unlock_irq, 206 .name = "spin_lock_irq" 207 }; 208 209 /* 210 * Lock torture writer kthread. Repeatedly acquires and releases 211 * the lock, checking for duplicate acquisitions. 212 */ 213 static int lock_torture_writer(void *arg) 214 { 215 struct lock_writer_stress_stats *lwsp = arg; 216 static DEFINE_TORTURE_RANDOM(rand); 217 218 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 219 set_user_nice(current, 19); 220 221 do { 222 schedule_timeout_uninterruptible(1); 223 cur_ops->writelock(); 224 if (WARN_ON_ONCE(lock_is_write_held)) 225 lwsp->n_write_lock_fail++; 226 lock_is_write_held = 1; 227 lwsp->n_write_lock_acquired++; 228 cur_ops->write_delay(&rand); 229 lock_is_write_held = 0; 230 cur_ops->writeunlock(); 231 stutter_wait("lock_torture_writer"); 232 } while (!torture_must_stop()); 233 torture_kthread_stopping("lock_torture_writer"); 234 return 0; 235 } 236 237 /* 238 * Create an lock-torture-statistics message in the specified buffer. 239 */ 240 static void lock_torture_printk(char *page) 241 { 242 bool fail = 0; 243 int i; 244 long max = 0; 245 long min = lwsa[0].n_write_lock_acquired; 246 long long sum = 0; 247 248 for (i = 0; i < nrealwriters_stress; i++) { 249 if (lwsa[i].n_write_lock_fail) 250 fail = true; 251 sum += lwsa[i].n_write_lock_acquired; 252 if (max < lwsa[i].n_write_lock_fail) 253 max = lwsa[i].n_write_lock_fail; 254 if (min > lwsa[i].n_write_lock_fail) 255 min = lwsa[i].n_write_lock_fail; 256 } 257 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG); 258 page += sprintf(page, 259 "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 260 sum, max, min, max / 2 > min ? "???" : "", 261 fail, fail ? "!!!" : ""); 262 if (fail) 263 atomic_inc(&n_lock_torture_errors); 264 } 265 266 /* 267 * Print torture statistics. Caller must ensure that there is only one 268 * call to this function at a given time!!! This is normally accomplished 269 * by relying on the module system to only have one copy of the module 270 * loaded, and then by giving the lock_torture_stats kthread full control 271 * (or the init/cleanup functions when lock_torture_stats thread is not 272 * running). 273 */ 274 static void lock_torture_stats_print(void) 275 { 276 int size = nrealwriters_stress * 200 + 8192; 277 char *buf; 278 279 buf = kmalloc(size, GFP_KERNEL); 280 if (!buf) { 281 pr_err("lock_torture_stats_print: Out of memory, need: %d", 282 size); 283 return; 284 } 285 lock_torture_printk(buf); 286 pr_alert("%s", buf); 287 kfree(buf); 288 } 289 290 /* 291 * Periodically prints torture statistics, if periodic statistics printing 292 * was specified via the stat_interval module parameter. 293 * 294 * No need to worry about fullstop here, since this one doesn't reference 295 * volatile state or register callbacks. 296 */ 297 static int lock_torture_stats(void *arg) 298 { 299 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 300 do { 301 schedule_timeout_interruptible(stat_interval * HZ); 302 lock_torture_stats_print(); 303 torture_shutdown_absorb("lock_torture_stats"); 304 } while (!torture_must_stop()); 305 torture_kthread_stopping("lock_torture_stats"); 306 return 0; 307 } 308 309 static inline void 310 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 311 const char *tag) 312 { 313 pr_alert("%s" TORTURE_FLAG 314 "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 315 torture_type, tag, nrealwriters_stress, stat_interval, verbose, 316 shuffle_interval, stutter, shutdown_secs, 317 onoff_interval, onoff_holdoff); 318 } 319 320 static void lock_torture_cleanup(void) 321 { 322 int i; 323 324 if (torture_cleanup()) 325 return; 326 327 if (writer_tasks) { 328 for (i = 0; i < nrealwriters_stress; i++) 329 torture_stop_kthread(lock_torture_writer, 330 writer_tasks[i]); 331 kfree(writer_tasks); 332 writer_tasks = NULL; 333 } 334 335 torture_stop_kthread(lock_torture_stats, stats_task); 336 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 337 338 if (atomic_read(&n_lock_torture_errors)) 339 lock_torture_print_module_parms(cur_ops, 340 "End of test: FAILURE"); 341 else if (torture_onoff_failures()) 342 lock_torture_print_module_parms(cur_ops, 343 "End of test: LOCK_HOTPLUG"); 344 else 345 lock_torture_print_module_parms(cur_ops, 346 "End of test: SUCCESS"); 347 } 348 349 static int __init lock_torture_init(void) 350 { 351 int i; 352 int firsterr = 0; 353 static struct lock_torture_ops *torture_ops[] = { 354 &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, 355 }; 356 357 torture_init_begin(torture_type, verbose, &locktorture_runnable); 358 359 /* Process args and tell the world that the torturer is on the job. */ 360 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 361 cur_ops = torture_ops[i]; 362 if (strcmp(torture_type, cur_ops->name) == 0) 363 break; 364 } 365 if (i == ARRAY_SIZE(torture_ops)) { 366 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 367 torture_type); 368 pr_alert("lock-torture types:"); 369 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 370 pr_alert(" %s", torture_ops[i]->name); 371 pr_alert("\n"); 372 torture_init_end(); 373 return -EINVAL; 374 } 375 if (cur_ops->init) 376 cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 377 378 if (nwriters_stress >= 0) 379 nrealwriters_stress = nwriters_stress; 380 else 381 nrealwriters_stress = 2 * num_online_cpus(); 382 lock_torture_print_module_parms(cur_ops, "Start of test"); 383 384 /* Initialize the statistics so that each run gets its own numbers. */ 385 386 lock_is_write_held = 0; 387 lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL); 388 if (lwsa == NULL) { 389 VERBOSE_TOROUT_STRING("lwsa: Out of memory"); 390 firsterr = -ENOMEM; 391 goto unwind; 392 } 393 for (i = 0; i < nrealwriters_stress; i++) { 394 lwsa[i].n_write_lock_fail = 0; 395 lwsa[i].n_write_lock_acquired = 0; 396 } 397 398 /* Start up the kthreads. */ 399 400 if (onoff_interval > 0) { 401 firsterr = torture_onoff_init(onoff_holdoff * HZ, 402 onoff_interval * HZ); 403 if (firsterr) 404 goto unwind; 405 } 406 if (shuffle_interval > 0) { 407 firsterr = torture_shuffle_init(shuffle_interval); 408 if (firsterr) 409 goto unwind; 410 } 411 if (shutdown_secs > 0) { 412 firsterr = torture_shutdown_init(shutdown_secs, 413 lock_torture_cleanup); 414 if (firsterr) 415 goto unwind; 416 } 417 if (stutter > 0) { 418 firsterr = torture_stutter_init(stutter); 419 if (firsterr) 420 goto unwind; 421 } 422 423 writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]), 424 GFP_KERNEL); 425 if (writer_tasks == NULL) { 426 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 427 firsterr = -ENOMEM; 428 goto unwind; 429 } 430 for (i = 0; i < nrealwriters_stress; i++) { 431 firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i], 432 writer_tasks[i]); 433 if (firsterr) 434 goto unwind; 435 } 436 if (stat_interval > 0) { 437 firsterr = torture_create_kthread(lock_torture_stats, NULL, 438 stats_task); 439 if (firsterr) 440 goto unwind; 441 } 442 torture_init_end(); 443 return 0; 444 445 unwind: 446 torture_init_end(); 447 lock_torture_cleanup(); 448 return firsterr; 449 } 450 451 module_init(lock_torture_init); 452 module_exit(lock_torture_cleanup); 453